Пример #1
0
def test_label_small_hyro(generators_eia860_data, test_settings,
                          plant_region_map_ipm_data):
    region_agg_map = reverse_dict_of_lists(
        test_settings.get("region_aggregations", {}))
    model_region_map_df = map_agg_region_names(
        df=plant_region_map_ipm_data,
        region_agg_map=region_agg_map,
        original_col_name="region",
        new_col_name="model_region",
    )
    df = pd.merge(generators_eia860_data,
                  model_region_map_df,
                  on="plant_id_eia",
                  how="left")
    logger.info(df[["plant_id_eia", "technology_description",
                    "model_region"]].head())

    # df["model_region"] = df["region"].map(reverse_dict_of_lists)

    df = label_small_hydro(df,
                           test_settings,
                           by=["plant_id_eia", "report_date"])
    print(df.query("plant_id_eia==34"))
    logger.info(df["technology_description"].unique())

    assert "Small Hydroelectric" in df["technology_description"].unique()
    assert np.allclose(
        df.loc[df.technology_description == "Small Hydroelectric",
               "capacity_mw"], 12.1)
Пример #2
0
def add_load_growth(load_curves, settings):

    load_map = reverse_dict_of_lists(settings["load_region_map"])

    load_growth_map = {
        ipm_region: settings["default_growth_rates"][load_region]
        for ipm_region, load_region in load_map.items()
    }

    if settings["alt_growth_rate"] is not None:
        for region, rate in settings["alt_growth_rate"].items():
            load_growth_map[region] = rate

    if "regular_load_growth_start_year" in settings.keys():
        # historical load growth

        demand_start = settings["aeo_hist_start_elec_demand"]
        demand_end = settings["aeo_hist_end_elec_demand"]

        if not all([key in demand_end.keys() for key in demand_start.keys()]):
            raise KeyError(
                "Error in keys for historical electricity demand. /n"
                "Not all keys in 'aeo_hist_start_elec_demand' are also in "
                "'aeo_hist_end_elec_demand'")

        historic_growth_ratio = {
            region: demand_end[region] / demand_start[region]
            for region in demand_start
        }
        historic_growth_map = {
            ipm_region: historic_growth_ratio[load_region]
            for ipm_region, load_region in load_map.items()
        }

        for region in load_curves["region_id_epaipm"].unique():
            hist_growth_factor = historic_growth_map[region]
            load_curves.loc[load_curves["region_id_epaipm"] == region,
                            "load_mw"] *= hist_growth_factor

        # Don't grow load over years where we already have historical data
        years_growth = (settings["model_year"] -
                        settings["regular_load_growth_start_year"])

    else:
        years_growth = settings["model_year"] - settings["default_load_year"]

    load_growth_factor = {
        region: (1 + rate)**years_growth
        for region, rate in load_growth_map.items()
    }

    for region in load_curves["region_id_epaipm"].unique():
        growth_factor = load_growth_factor[region]
        load_curves.loc[load_curves["region_id_epaipm"] == region,
                        "load_mw"] *= growth_factor

    return load_curves
Пример #3
0
def overwrite_wind_pv_capacity(df, settings):
    """Use external data to overwrite the wind and solarpv capacity extracted from
    EIA860.

    Parameters
    ----------
    df : DataFrame
        Existing generators dataframe, with columns "region", "technology", and
        "Existing_Cap_MW". The technologies should include "Solar Photovoltaic"
        and "Onshore Wind Turbine".
    settings : dict
        User defined PowerGenome settings. Must have the keys "input_folder" and
        "region_wind_pv_cap_fn".

    Returns
    -------
    DataFrame
        Same as input dataframe but with new capacity values for technologies defined
        in the "region_wind_pv_cap_fn" file.
    """
    from powergenome.util import reverse_dict_of_lists

    idx = pd.IndexSlice

    path = settings["input_folder"] / settings["region_wind_pv_cap_fn"]

    wind_pv_ipm_region_capacity = pd.read_csv(path)

    region_agg_map = reverse_dict_of_lists(
        settings.get("region_aggregations", {}))

    # Set model_region as IPM_region to start
    wind_pv_ipm_region_capacity["model_region"] = wind_pv_ipm_region_capacity[
        "IPM_Region"]
    # Change any aggregated regions to the user-defined model_region
    wind_pv_ipm_region_capacity.loc[
        wind_pv_ipm_region_capacity["IPM_Region"].isin(region_agg_map),
        "model_region"] = wind_pv_ipm_region_capacity.loc[
            wind_pv_ipm_region_capacity["IPM_Region"].isin(region_agg_map),
            "IPM_Region"].map(region_agg_map)
    wind_pv_model_region_capacity = wind_pv_ipm_region_capacity.groupby(
        ["model_region", "technology"]).sum()

    df = df.reset_index()

    for region in df["region"].unique():
        for tech in ["Solar Photovoltaic", "Onshore Wind Turbine"]:
            if tech in df.query("region == @region")["technology"].to_list():
                df.loc[
                    (df["region"] == region) & (df["technology"] == tech),
                    "Existing_Cap_MW", ] = wind_pv_model_region_capacity.loc[
                        idx[region, tech], "nameplate_capacity_mw"]

    df = df.set_index(["region", "technology", "cluster"])

    return df
Пример #4
0
def make_load_curves(
    pudl_engine,
    settings,
    pudl_table="load_curves_epaipm",
    settings_agg_key="region_aggregations",
):

    # Settings has a dictionary of lists for regional aggregations. Need
    # to reverse this to use in a map method.
    region_agg_map = reverse_dict_of_lists(settings[settings_agg_key])

    # IPM regions to keep. Regions not in this list will be dropped from the
    # dataframe
    keep_regions = [
        x for x in settings["model_regions"] + list(region_agg_map)
        if x not in region_agg_map.values()
    ]

    # I'd rather use a sql query and only pull the regions of interest but
    # sqlalchemy doesn't allow table names to be parameterized.
    logger.info("Loading load curves from PUDL")
    load_curves = pd.read_sql_table(
        pudl_table,
        pudl_engine,
        columns=["region_id_epaipm", "time_index", "load_mw"])

    load_curves = load_curves.loc[load_curves.region_id_epaipm.isin(
        keep_regions)]

    # Increase demand to account for load growth
    load_curves = add_load_growth(load_curves, settings)

    # Set a new column "region" to the old column values. Then replace values for any
    # regions that are being aggregated
    load_curves.loc[:, "region"] = load_curves.loc[:, "region_id_epaipm"]

    load_curves.loc[
        load_curves.region_id_epaipm.isin(region_agg_map.keys()),
        "region"] = load_curves.loc[
            load_curves.region_id_epaipm.isin(region_agg_map.keys()),
            "region_id_epaipm"].map(region_agg_map)

    logger.info("Aggregating load curves in grouped regions")
    load_curves_agg = load_curves.groupby(["region", "time_index"]).sum()

    lc_wide = load_curves_agg.unstack(level=0)
    lc_wide.columns = lc_wide.columns.droplevel()

    pst_offset = settings["target_region_pst_offset"]

    lc_wide = shift_wrap_profiles(lc_wide, pst_offset)

    lc_wide.index.name = "time_index"
    lc_wide.index = lc_wide.index + 1

    return lc_wide
Пример #5
0
def add_load_growth(load_curves, settings):

    load_map = reverse_dict_of_lists(settings["load_region_map"])
    load_growth_map = {
        ipm_region: settings["default_growth_rates"][load_region]
        for ipm_region, load_region in load_map.items()
    }

    if settings["alt_growth_rate"] is not None:
        for region, rate in settings["alt_growth_rate"].items():
            load_growth_map[region] = rate

    years_growth = settings["model_year"] - settings["default_load_year"]
    load_growth_factor = {
        region: (1 + rate)**years_growth
        for region, rate in load_growth_map.items()
    }

    for region in load_curves["region_id_epaipm"].unique():
        growth_factor = load_growth_factor[region]
        load_curves.loc[load_curves["region_id_epaipm"] == region,
                        "load_mw"] *= growth_factor

    return load_curves
Пример #6
0
def add_load_growth(load_curves: pd.DataFrame, settings: dict) -> pd.DataFrame:
    keep_regions, region_agg_map = regions_to_keep(settings)
    hist_region_map = reverse_dict_of_lists(
        settings["historical_load_region_maps"])
    future_region_map = reverse_dict_of_lists(
        settings["future_load_region_map"])

    hist_demand_start = {
        ipm_region:
        get_aeo_load(region=hist_region_map[ipm_region],
                     aeo_year=2014,
                     scenario_series="REF2014").set_index("year").loc[2012,
                                                                      "demand"]
        for ipm_region in keep_regions
    }
    hist_demand_end = {
        ipm_region:
        get_aeo_load(region=hist_region_map[ipm_region],
                     aeo_year=2019,
                     scenario_series="REF2019").set_index("year").loc[2018,
                                                                      "demand"]
        for ipm_region in keep_regions
    }

    load_growth_dict = {
        ipm_region: get_aeo_load(
            region=future_region_map[ipm_region],
            aeo_year=settings.get("eia_aeo_year", 2020),
            scenario_series="REF2020",
        ).set_index("year")
        for ipm_region in keep_regions
    }

    load_growth_start_map = {
        ipm_region:
        _df.loc[settings.get("regular_load_growth_start_year", 2019), "demand"]
        for ipm_region, _df in load_growth_dict.items()
    }

    load_growth_end_map = {
        ipm_region: _df.loc[settings["model_year"], "demand"]
        for ipm_region, _df in load_growth_dict.items()
    }

    future_growth_factor = {
        ipm_region:
        load_growth_end_map[ipm_region] / load_growth_start_map[ipm_region]
        for ipm_region in keep_regions
    }
    hist_growth_factor = {
        ipm_region: hist_demand_end[ipm_region] / hist_demand_start[ipm_region]
        for ipm_region in keep_regions
    }

    years_growth = settings["model_year"] - settings[
        "regular_load_growth_start_year"]

    for region, rate in (settings.get("alt_growth_rate") or {}).items():
        future_growth_factor[region] = (1 + rate)**years_growth

    for region in keep_regions:
        load_curves.loc[load_curves["region_id_epaipm"] == region,
                        "load_mw"] *= (hist_growth_factor[region] *
                                       future_growth_factor[region])

    return load_curves
        "NY_Z_B",
        "NY_Z_C&E",
        "NY_Z_D",
        "NY_Z_F",
        "NY_Z_G-I",
    ],
    "NYCW": ["NY_Z_J", "NY_Z_K"],
    "ISNE": ["NENG_ME", "NENGREST", "NENG_CT"],
    "RMRG": ["WECC_CO"],
    "BASN": ["WECC_ID", "WECC_WY", "WECC_UT", "WECC_NNV"],
    "NWPP": ["WECC_PNW", "WECC_MT"],
    "CANO": ["WEC_CALN", "WEC_BANC"],
    "CASO": ["WECC_IID", "WECC_SCE", "WEC_LADW", "WEC_SDGE"],
    "SRSG": ["WECC_AZ", "WECC_NM", "WECC_SNV"],
}
rev_cost_mult_region_map = reverse_dict_of_lists(cost_multiplier_region_map)

tx_capex_region_map = {
    "wecc": [
        "WECC_AZ",
        "WECC_CO",
        "WECC_ID",
        "WECC_MT",
        "WECC_NM",
        "WECC_NNV",
        "WECC_PNW",
        "WECC_SNV",
        "WECC_UT",
        "WECC_WY",
    ],
    "ca": [
Пример #8
0
def atb_new_generators(atb_costs, atb_hr, settings):
    """Add rows for new generators in each region

    Parameters
    ----------
    atb_costs : DataFrame
        All cost parameters from the SQL table for new generators. Should include:
        ['technology', 'cost_case', 'financial_case', 'basis_year', 'tech_detail',
        'capex', 'capex_mwh', 'o_m_fixed_mw', 'o_m_fixed_mwh', 'o_m_variable_mwh',
        'waccnomtech']
    atb_hr : DataFrame
        The technology, tech_detail, and heat_rate of new generators from ATB.
    settings : dict
        User-defined parameters from a settings file

    Returns
    -------
    DataFrame
        New generating resources in every region. Contains the columns:
        ['technology', 'basis_year', 'Fixed_OM_cost_per_MWyr',
       'Fixed_OM_cost_per_MWhyr', 'Var_OM_cost_per_MWh', 'capex', 'capex_mwh',
       'Inv_cost_per_MWyr', 'Inv_cost_per_MWhyr', 'Heat_rate_MMBTU_per_MWh',
       'Cap_size', 'region']
    """
    logger.info("Creating new resources for each region.")
    new_gen_types = settings["atb_new_gen"]
    model_year = settings["model_year"]
    try:
        first_planning_year = settings["model_first_planning_year"]
        model_year_range = range(first_planning_year, model_year + 1)
    except KeyError:
        model_year_range = list(range(model_year + 1))

    regions = settings["model_regions"]

    atb_costs_hr = atb_costs.merge(
        atb_hr, on=["technology", "tech_detail", "basis_year"], how="left")

    new_gen_df = pd.concat(
        [
            single_generator_row(atb_costs_hr, new_gen, model_year_range)
            for new_gen in new_gen_types
        ],
        ignore_index=True,
    )

    if isinstance(settings["atb_battery_wacc"], float):
        new_gen_df.loc[new_gen_df["technology"] == "Battery",
                       "waccnomtech"] = settings["atb_battery_wacc"]
    elif isinstance(settings["atb_battery_wacc"], str):
        solar_wacc = new_gen_df.loc[
            new_gen_df["technology"].str.contains("UtilityPV"),
            "waccnomtech"].values[0]

        new_gen_df.loc[new_gen_df["technology"] == "Battery",
                       "waccnomtech"] = solar_wacc

    # Add user-defined technologies
    # This should probably be separate from ATB techs, and the regional cost multipliers
    # should be its own function.
    if settings["additional_technologies_fn"] is not None:
        if isinstance(settings["additional_new_gen"], list):
            # user_costs, user_hr = load_user_defined_techs(settings)
            user_tech = load_user_defined_techs(settings)
            # new_gen_df = pd.concat([new_gen_df, user_costs], ignore_index=True, sort=False)
            new_gen_df = pd.concat([new_gen_df, user_tech],
                                   ignore_index=True,
                                   sort=False)
            # atb_hr = pd.concat([atb_hr, user_hr], ignore_index=True, sort=False)
        else:
            logger.warning(
                "A filename for additional technologies was included but no technologies"
                " were specified in the settings file.")

    if settings["modified_atb_new_gen"] is not None:
        modified_gens = add_modified_atb_generators(settings, atb_costs_hr,
                                                    model_year_range)
        new_gen_df = pd.concat([new_gen_df, modified_gens], ignore_index=True)

    new_gen_df = new_gen_df.rename(
        columns={
            "heat_rate": "Heat_rate_MMBTU_per_MWh",
            "o_m_fixed_mw": "Fixed_OM_cost_per_MWyr",
            "o_m_fixed_mwh": "Fixed_OM_cost_per_MWhyr",
            "o_m_variable_mwh": "Var_OM_cost_per_MWh",
        })

    # Adjust values for CT/CC generators to match advanced techs in NEMS rather than
    # ATB average of advanced and conventional.
    # This is now generalized for changes to ATB values for any technology type.
    for tech, _tech_modifiers in settings["atb_modifiers"].items():
        tech_modifiers = copy.deepcopy(_tech_modifiers)
        assert isinstance(tech_modifiers, dict), (
            "The settings parameter 'atb_modifiers' must be a nested list.\n"
            "Each top-level key is a short name of the technology, with a nested"
            " dictionary of items below it.")
        assert (
            "technology" in tech_modifiers.keys()
        ), "Each nested dictionary in atb_modifiers must have a 'technology' key."
        assert (
            "tech_detail" in tech_modifiers.keys()
        ), "Each nested dictionary in atb_modifiers must have a 'tech_detail' key."

        technology = tech_modifiers.pop("technology")
        tech_detail = tech_modifiers.pop("tech_detail")

        allowed_operators = ["add", "mul", "truediv", "sub"]

        for key, op_list in tech_modifiers.items():

            assert len(op_list) == 2, (
                "Two values, an operator and a numeric value, are needed in the parameter\n"
                f"'{parameter}' for technology '{tech}' in 'atb_modifiers'.")
            op, op_value = op_list

            assert op in allowed_operators, (
                f"The key {key} for technology {tech} needs a valid operator from the list\n"
                f"{allowed_operators}\n"
                "in the format [<operator>, <value>] to modify the properties of an existing generator.\n"
            )

            f = operator.attrgetter(op)
            new_gen_df.loc[(new_gen_df.technology == technology)
                           & (new_gen_df.tech_detail == tech_detail),
                           key, ] = f(operator)(
                               new_gen_df.loc[
                                   (new_gen_df.technology == technology)
                                   & (new_gen_df.tech_detail == tech_detail),
                                   key, ],
                               op_value,
                           )

    new_gen_df["technology"] = (new_gen_df["technology"] + "_" +
                                new_gen_df["tech_detail"].astype(str) + "_" +
                                new_gen_df["cost_case"])

    new_gen_df["cap_recovery_years"] = settings["atb_cap_recovery_years"]

    if settings["alt_atb_cap_recovery_years"]:
        for tech, years in settings["alt_atb_cap_recovery_years"].items():
            new_gen_df.loc[
                new_gen_df.technology.str.lower().str.contains(tech.lower()),
                "cap_recovery_years", ] = years

    new_gen_df["Inv_cost_per_MWyr"] = investment_cost_calculator(
        capex=new_gen_df["capex"],
        wacc=new_gen_df["waccnomtech"],
        cap_rec_years=new_gen_df["cap_recovery_years"],
    )

    new_gen_df["Inv_cost_per_MWhyr"] = investment_cost_calculator(
        capex=new_gen_df["capex_mwh"],
        wacc=new_gen_df["waccnomtech"],
        cap_rec_years=new_gen_df["cap_recovery_years"],
    )

    new_gen_df["cap_recovery_years"] = settings["atb_cap_recovery_years"]

    # Some technologies might have a different capital recovery period
    if settings["alt_atb_cap_recovery_years"] is not None:
        for tech, years in settings["alt_atb_cap_recovery_years"].items():
            tech_mask = new_gen_df["technology"].str.contains(tech)

            new_gen_df.loc[tech_mask, "cap_recovery_years"] = years

            new_gen_df.loc[tech_mask,
                           "Inv_cost_per_MWyr"] = investment_cost_calculator(
                               capex=new_gen_df.loc[tech_mask, "capex"],
                               wacc=new_gen_df.loc[tech_mask, "waccnomtech"],
                               cap_rec_years=years,
                           )

            new_gen_df.loc[tech_mask,
                           "Inv_cost_per_MWhyr"] = investment_cost_calculator(
                               capex=new_gen_df.loc[tech_mask, "capex_mwh"],
                               wacc=new_gen_df.loc[tech_mask, "waccnomtech"],
                               cap_rec_years=years,
                           )

    keep_cols = [
        "technology",
        "basis_year",
        "Fixed_OM_cost_per_MWyr",
        "Fixed_OM_cost_per_MWhyr",
        "Var_OM_cost_per_MWh",
        "capex",
        "capex_mwh",
        "Inv_cost_per_MWyr",
        "Inv_cost_per_MWhyr",
        "Heat_rate_MMBTU_per_MWh",
        "Cap_size",
        "cap_recovery_years",
        "waccnomtech",
        "regional_cost_multiplier",
    ]

    regional_cost_multipliers = pd.read_csv(
        DATA_PATHS["cost_multipliers"] / "EIA regional cost multipliers.csv",
        index_col=0,
    )
    rev_mult_region_map = reverse_dict_of_lists(
        settings["cost_multiplier_region_map"])
    rev_mult_tech_map = reverse_dict_of_lists(
        settings["cost_multiplier_technology_map"])
    df_list = []
    for region in regions:
        _df = new_gen_df.loc[:, keep_cols].copy()
        _df["region"] = region
        _df = regional_capex_multiplier(
            _df,
            region,
            rev_mult_region_map,
            rev_mult_tech_map,
            regional_cost_multipliers,
        )
        _df = add_extra_wind_solar_rows(_df, region, settings)

        if (settings["new_gen_not_available"] is not None
                and region in settings["new_gen_not_available"].keys()):
            techs = settings["new_gen_not_available"][region]
            for tech in techs:
                _df = _df.loc[~_df["technology"].str.contains(tech), :]

        df_list.append(_df)

    results = pd.concat(df_list, ignore_index=True)

    int_cols = [
        "Fixed_OM_cost_per_MWyr",
        "Fixed_OM_cost_per_MWhyr",
        "Inv_cost_per_MWyr",
        "Inv_cost_per_MWhyr",
    ]
    results = results.fillna(0)
    results.loc[:, int_cols] = results.loc[:, int_cols].astype(int)
    results.loc[:, "Var_OM_cost_per_MWh"] = (
        results.loc[:, "Var_OM_cost_per_MWh"].astype(float).round(1))

    return results
Пример #9
0
def agg_transmission_constraints(
    pudl_engine,
    settings,
    pudl_table="transmission_single_epaipm",
    settings_agg_key="region_aggregations",
):

    zones = settings["model_regions"]
    zone_num_map = {
        zone: f"z{number + 1}" for zone, number in zip(zones, range(len(zones)))
    }

    combos = list(itertools.combinations(zones, 2))
    reverse_combos = [(combo[-1], combo[0]) for combo in combos]

    logger.info("Loading transmission constraints from PUDL")
    transmission_constraints_table = pd.read_sql_table(pudl_table, con=pudl_engine)
    # Settings has a dictionary of lists for regional aggregations. Need
    # to reverse this to use in a map method.
    region_agg_map = reverse_dict_of_lists(settings[settings_agg_key])

    # IPM regions to keep. Regions not in this list will be dropped from the
    # dataframe
    keep_regions = [
        x
        for x in settings["model_regions"] + list(region_agg_map)
        if x not in region_agg_map.values()
    ]

    # Create new column "model_region_from"  and "model_region_to" with labels that
    # we're using for aggregated regions
    transmission_constraints_table = transmission_constraints_table.loc[
        (transmission_constraints_table.region_from.isin(keep_regions))
        & (transmission_constraints_table.region_to.isin(keep_regions)),
        :,
    ].drop(columns="id")

    logger.info("Map and aggregate region names for transmission constraints")
    for col in ["region_from", "region_to"]:
        model_col = "model_" + col

        transmission_constraints_table = map_agg_region_names(
            df=transmission_constraints_table,
            region_agg_map=region_agg_map,
            original_col_name=col,
            new_col_name=model_col,
        )

    transmission_constraints_table.drop(
        columns=["firm_ttc_mw", "tariff_mills_kwh"], inplace=True
    )
    transmission_constraints_table = transmission_constraints_table.groupby(
        ["model_region_from", "model_region_to"]
    ).sum()

    # Build the final output dataframe
    logger.info(
        "Build a new transmission constraints dataframe with a single line between "
        "regions"
    )
    tc_joined = pd.DataFrame(
        columns=["Network_lines"] + zones + ["Line_Max_Flow_MW", "Line_Min_Flow_MW"],
        index=transmission_constraints_table.reindex(combos).dropna().index,
        data=0,
    )
    tc_joined["Network_lines"] = range(1, len(tc_joined) + 1)
    tc_joined["Line_Max_Flow_MW"] = transmission_constraints_table.reindex(
        combos
    ).dropna()

    reverse_tc = transmission_constraints_table.reindex(reverse_combos).dropna() * -1
    reverse_tc.index = tc_joined.index
    tc_joined["Line_Min_Flow_MW"] = reverse_tc

    for idx, row in tc_joined.iterrows():
        tc_joined.loc[idx, idx[0]] = 1
        tc_joined.loc[idx, idx[-1]] = -1

    tc_joined.rename(columns=zone_num_map, inplace=True)
    tc_joined = tc_joined.reset_index()
    tc_joined["Transmission Path Name"] = (
        tc_joined["model_region_from"] + "_to_" + tc_joined["model_region_to"]
    )
    tc_joined = tc_joined.set_index("Transmission Path Name")
    tc_joined.drop(columns=["model_region_from", "model_region_to"], inplace=True)

    return tc_joined
Пример #10
0
def atb_new_generators(atb_costs, atb_hr, settings):
    """Add rows for new generators in each region

    Parameters
    ----------
    results : DataFrame
        Compiled results of clustered power plants with weighted average heat
    atb_costs : [type]
        [description]
    atb_hr : [type]
        [description]
    settings : [type]
        [description]

    Returns
    -------
    [type]
        [description]
    """
    logger.info("Creating new resources for each region.")
    new_gen_types = settings["atb_new_gen"]
    model_year = settings["model_year"]
    try:
        first_planning_year = settings["model_first_planning_year"]
        model_year_range = range(first_planning_year, model_year + 1)
    except KeyError:
        model_year_range = list(range(model_year + 1))

    regions = settings["model_regions"]

    new_gen_df = pd.concat(
        [
            single_generator_row(atb_costs, new_gen, model_year_range)
            for new_gen in new_gen_types
        ],
        ignore_index=True,
    )

    if isinstance(settings["atb_battery_wacc"], float):
        new_gen_df.loc[new_gen_df["technology"] == "Battery",
                       "waccnomtech"] = settings["atb_battery_wacc"]
    elif isinstance(settings["atb_battery_wacc"], str):
        solar_wacc = new_gen_df.loc[
            new_gen_df["technology"].str.contains("UtilityPV"),
            "waccnomtech"].values[0]

        new_gen_df.loc[new_gen_df["technology"] == "Battery",
                       "waccnomtech"] = solar_wacc

    # Add user-defined technologies
    # This should probably be separate from ATB techs, and the regional cost multipliers
    # should be its own function.
    if settings["additional_technologies_fn"] is not None:
        user_costs, user_hr = load_user_defined_techs(settings)
        new_gen_df = pd.concat([new_gen_df, user_costs],
                               ignore_index=True,
                               sort=False)
        atb_hr = pd.concat([atb_hr, user_hr], ignore_index=True, sort=False)

    new_gen_df = new_gen_df.merge(
        atb_hr, on=["technology", "tech_detail", "basis_year"], how="left")

    if settings["modified_atb_new_gen"] is not None:
        modified_gens = add_modified_atb_generators(settings, atb_costs,
                                                    atb_hr, model_year_range)
        new_gen_df = pd.concat([new_gen_df, modified_gens], ignore_index=True)

    new_gen_df = new_gen_df.rename(
        columns={
            "heat_rate": "Heat_rate_MMBTU_per_MWh",
            "o_m_fixed_mw": "Fixed_OM_cost_per_MWyr",
            "o_m_fixed_mwh": "Fixed_OM_cost_per_MWhyr",
            "o_m_variable_mwh": "Var_OM_cost_per_MWh",
        })

    # Adjust values for CT/CC generators to match advanced techs in NEMS rather than
    # ATB average of advanced and conventional.
    # This is now generalized for changes to ATB values for any technology type.
    for tech, tech_multipliers in settings["atb_multipliers"].items():
        assert isinstance(tech_multipliers, dict), (
            "The settings parameter 'atb_multipliers' must be a nested list.\n"
            "Each top-level key is a short name of the technology, with a nested"
            " dictionary of items below it.")
        assert (
            "technology" in tech_multipliers.keys()
        ), "Each nested dictionary in atb_multipliers must have a 'technology' key."
        assert (
            "tech_detail" in tech_multipliers.keys()
        ), "Each nested dictionary in atb_multipliers must have a 'tech_detail' key."

        technology = tech_multipliers.pop("technology")
        tech_detail = tech_multipliers.pop("tech_detail")

        for key, multiplier in tech_multipliers.items():

            new_gen_df.loc[(new_gen_df.technology == technology)
                           & (new_gen_df.tech_detail == tech_detail),
                           key, ] *= multiplier

    new_gen_df["technology"] = (new_gen_df["technology"] + "_" +
                                new_gen_df["tech_detail"].astype(str) + "_" +
                                new_gen_df["cost_case"])

    new_gen_df["Inv_cost_per_MWyr"] = investment_cost_calculator(
        capex=new_gen_df["capex"],
        wacc=new_gen_df["waccnomtech"],
        cap_rec_years=settings["atb_cap_recovery_years"],
    )

    new_gen_df["Inv_cost_per_MWhyr"] = investment_cost_calculator(
        capex=new_gen_df["capex_mwh"],
        wacc=new_gen_df["waccnomtech"],
        cap_rec_years=settings["atb_cap_recovery_years"],
    )

    # Some technologies might have a different capital recovery period
    if settings["alt_atb_cap_recovery_years"] is not None:
        for tech, years in settings["alt_atb_cap_recovery_years"].items():
            tech_mask = new_gen_df["technology"].str.contains(tech)

            new_gen_df.loc[tech_mask,
                           "Inv_cost_per_MWyr"] = investment_cost_calculator(
                               capex=new_gen_df.loc[tech_mask, "capex"],
                               wacc=new_gen_df.loc[tech_mask, "waccnomtech"],
                               cap_rec_years=years,
                           )

            new_gen_df.loc[tech_mask,
                           "Inv_cost_per_MWhyr"] = investment_cost_calculator(
                               capex=new_gen_df.loc[tech_mask, "capex_mwh"],
                               wacc=new_gen_df.loc[tech_mask, "waccnomtech"],
                               cap_rec_years=years,
                           )

    keep_cols = [
        "technology",
        "basis_year",
        "Fixed_OM_cost_per_MWyr",
        "Fixed_OM_cost_per_MWhyr",
        "Var_OM_cost_per_MWh",
        "capex",
        "capex_mwh",
        "Inv_cost_per_MWyr",
        "Inv_cost_per_MWhyr",
        "Heat_rate_MMBTU_per_MWh",
        "Cap_size",
    ]

    regional_cost_multipliers = pd.read_csv(
        DATA_PATHS["cost_multipliers"] / "EIA regional cost multipliers.csv",
        index_col=0,
    )
    rev_mult_region_map = reverse_dict_of_lists(
        settings["cost_multiplier_region_map"])
    rev_mult_tech_map = reverse_dict_of_lists(
        settings["cost_multiplier_technology_map"])
    df_list = []
    for region in regions:
        _df = new_gen_df.loc[:, keep_cols].copy()
        _df["region"] = region
        _df = regional_capex_multiplier(
            _df,
            region,
            rev_mult_region_map,
            rev_mult_tech_map,
            regional_cost_multipliers,
        )
        _df = add_extra_wind_solar_rows(_df, region, settings)

        if region in settings["new_gen_not_available"].keys():
            techs = settings["new_gen_not_available"][region]
            for tech in techs:
                _df = _df.loc[~_df["technology"].str.contains(tech), :]

        df_list.append(_df)

    results = pd.concat(df_list, ignore_index=True)

    int_cols = [
        "Fixed_OM_cost_per_MWyr",
        "Fixed_OM_cost_per_MWhyr",
        "Inv_cost_per_MWyr",
        "Inv_cost_per_MWhyr",
    ]
    results = results.fillna(0)
    results.loc[:, int_cols] = results.loc[:, int_cols].astype(int)
    results.loc[:, "Var_OM_cost_per_MWh"] = (
        results.loc[:, "Var_OM_cost_per_MWh"].astype(float).round(1))

    return results
Пример #11
0
def atb_new_generators(results, atb_costs, atb_hr, settings):
    """Add rows for new generators in each region

    Parameters
    ----------
    results : DataFrame
        Compiled results of clustered power plants with weighted average heat
    atb_costs : [type]
        [description]
    atb_hr : [type]
        [description]
    settings : [type]
        [description]

    Returns
    -------
    [type]
        [description]
    """

    new_gen_types = settings["atb_new_gen"]
    model_year = settings["model_year"]
    regions = settings["model_regions"]

    new_gen_df = pd.concat(
        [
            single_generator_row(atb_costs, new_gen, model_year)
            for new_gen in new_gen_types
        ],
        ignore_index=True,
    )

    new_gen_df["Inv_cost_per_MWyr"] = investment_cost_calculator(
        capex=new_gen_df["capex"],
        wacc=new_gen_df["waccnomtech"],
        cap_rec_years=settings["atb_cap_recovery_years"],
    )

    new_gen_df = new_gen_df.merge(
        atb_hr, on=["technology", "tech_detail", "basis_year"], how="left")

    new_gen_df = new_gen_df.rename(
        columns={
            "heat_rate": "Heat_rate_MMBTU_per_MWh",
            "o_m_fixed_mw": "Fixed_OM_cost_per_MWyr",
            "o_m_variable_mwh": "Var_OM_cost_per_MWh",
        })

    new_gen_df["technology"] = (new_gen_df["technology"] + "_" +
                                new_gen_df["tech_detail"] + "_" +
                                new_gen_df["cost_case"])

    keep_cols = [
        "technology",
        "basis_year",
        "Fixed_OM_cost_per_MWyr",
        "Var_OM_cost_per_MWh",
        "Inv_cost_per_MWyr",
        "Heat_rate_MMBTU_per_MWh",
        "Cap_size",
    ]

    regional_cost_multipliers = pd.read_csv(
        DATA_PATHS["cost_multipliers"] / "EIA regional cost multipliers.csv",
        index_col=0,
    )
    rev_mult_region_map = reverse_dict_of_lists(
        settings["cost_multiplier_region_map"])
    rev_mult_tech_map = reverse_dict_of_lists(
        settings["cost_multiplier_technology_map"])
    df_list = []
    for region in regions:
        _df = new_gen_df.loc[:, keep_cols].copy()
        _df["region"] = region
        _df = regional_capex_multiplier(
            _df,
            region,
            rev_mult_region_map,
            rev_mult_tech_map,
            regional_cost_multipliers,
        )
        df_list.append(_df)

    results = pd.concat(
        [results, pd.concat(df_list, ignore_index=True)], ignore_index=True)

    return results