Esempio n. 1
0
def make_genx_settings_file(pudl_engine, settings, calculated_ces=None):
    """Make a copy of the GenX settings file for a specific case.

    This function tries to make some intellegent choices about parameter values like
    the RPS/CES type and can also read values from a file.

    There should be a base-level GenX settings file with parameters like the solver and
    solver-specific settings that stay constant across all cases.

    Parameters
    ----------
    pudl_engine : sqlalchemy.Engine
        A sqlalchemy connection for use by pandas to access IPM load profiles. These
        load profiles are needed when DG is calculated as a fraction of load.
    settings : dict
        User-defined parameters from a settings file. Should have keys of `model_year`
        `case_id`, 'case_name', `input_folder` (a Path object of where to find
        user-supplied data), `emission_policies_fn`, 'distributed_gen_profiles_fn'
        (the files to load in other functions), and 'genx_settings_fn'.

    Returns
    -------
    dict
        Dictionary of settings for a GenX run
    """

    model_year = settings["model_year"]
    case_id = settings["case_id"]
    case_name = settings["case_name"]

    genx_settings = load_settings(settings["genx_settings_fn"])
    policies = load_policy_scenarios(settings)
    year_case_policy = policies.loc[(case_id, model_year), :]

    # Bug where multiple regions for a case will return this as a df, even if the policy
    # for this case applies to all regions (code below expects a Series)
    ycp_shape = year_case_policy.shape
    if ycp_shape[0] == 1 and len(ycp_shape) > 1:
        year_case_policy = year_case_policy.squeeze()  # convert to series

    if settings.get("distributed_gen_profiles_fn"):
        dg_generation = make_distributed_gen_profiles(pudl_engine, settings)
        total_dg_gen = dg_generation.sum().sum()
    else:
        total_dg_gen = 0

    if isinstance(year_case_policy, pd.DataFrame):
        year_case_policy = year_case_policy.sum()

    # If a value isn't supplied to the function use value from file
    if calculated_ces is None:
        CES = year_case_policy["CES"]
    else:
        CES = calculated_ces
    RPS = year_case_policy["RPS"]

    # THIS WILL NEED TO BE MORE FLEXIBLE FOR OTHER SCENARIOS
    if float(year_case_policy["CO_2_Max_Mtons"]) >= 0:
        genx_settings["CO2Cap"] = 2
    else:
        genx_settings["CO2Cap"] = 0

    if float(year_case_policy["RPS"]) > 0:
        # print(total_dg_gen)
        # print(year_case_policy["RPS"])
        if policies.loc[(case_id, model_year), "region"].all() == "all":
            genx_settings["RPS"] = 3
            genx_settings["RPS_Adjustment"] = float((1 - RPS) * total_dg_gen)
        else:
            genx_settings["RPS"] = 2
            genx_settings["RPS_Adjustment"] = 0
    else:
        genx_settings["RPS"] = 0
        genx_settings["RPS_Adjustment"] = 0

    if float(year_case_policy["CES"]) > 0:
        if policies.loc[(case_id, model_year), "region"].all() == "all":
            genx_settings["CES"] = 3

            # This is a little confusing but for partial CES
            if settings.get("partial_ces"):
                genx_settings["CES_Adjustment"] = 0
            else:
                genx_settings["CES_Adjustment"] = float(
                    (1 - CES) * total_dg_gen)
        else:
            genx_settings["CES"] = 2
            genx_settings["CES_Adjustment"] = 0
    else:
        genx_settings["CES"] = 0
        genx_settings["CES_Adjustment"] = 0

    # Don't wrap when time domain isn't reduced
    if not settings.get("reduce_time_domain"):
        genx_settings["OperationWrapping"] = 0

    genx_settings["case_id"] = case_id
    genx_settings["case_name"] = case_name
    genx_settings["year"] = str(model_year)

    # This is a new setting, will need to have a way to change.
    genx_settings["CapacityReserveMargin"] = 0
    genx_settings["LDS"] = 0

    # Load user defined values for the genx settigns file. This overrides the
    # complicated logic above.
    if settings.get("case_genx_settings_fn"):
        user_genx_settings = load_user_genx_settings(settings)
        user_case_settings = user_genx_settings.loc[(case_id, model_year), :]
        for key, value in user_case_settings.items():
            if not pd.isna(value):
                genx_settings[key] = value

    return genx_settings
Esempio n. 2
0
def main():

    args = parse_command_line(sys.argv)

    out_folder = DATA_PATHS["results"] / args.results_folder
    DATA_PATHS["results"].mkdir(exist_ok=True)
    out_folder.mkdir(exist_ok=True)

    # Create a logger to output any messages we might have...
    logger = logging.getLogger(powergenome.__name__)
    logger.setLevel(logging.INFO)
    handler = logging.StreamHandler()
    formatter = logging.Formatter(
        # More extensive test-like formatter...
        "%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s",
        # This is the datetime format string.
        "%Y-%m-%d %H:%M:%S",
    )
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    filehandler = logging.FileHandler(out_folder / "log.txt")
    filehandler.setFormatter(formatter)
    logger.addHandler(filehandler)

    git_hash = get_git_hash()
    logger.info(f"Current git hash is {git_hash}")

    logger.info("Reading settings file")
    settings = load_settings(path=args.settings_file)

    # Copy the settings file to results folder
    shutil.copy(args.settings_file, out_folder)

    logger.info("Initiating PUDL connections")
    pudl_engine, pudl_out = init_pudl_connection(freq="YS")

    # Make sure everything in model_regions is either an aggregate region
    # or an IPM region. Will need to change this once we start using non-IPM
    # regions.
    ipm_regions = pd.read_sql_table("regions_entity_epaipm", pudl_engine)[
        "region_id_epaipm"
    ]
    all_valid_regions = ipm_regions.tolist() + list(
        settings.get("region_aggregations", {})
    )
    good_regions = [region in all_valid_regions for region in settings["model_regions"]]

    # assert all(good_regions), logger.warning(
    #     "One or more model regions is not valid. Check to make sure all"
    #     "regions are either in IPM or region_aggregations in the settings YAML file."
    # )
    if not all(good_regions):
        logger.warning(
            "One or more model regions is not valid. Check to make sure all regions "
            "are either in IPM or region_aggregations in the settings YAML file."
        )

    # Sort zones in the settings to make sure they are correctly sorted everywhere.
    settings["model_regions"] = sorted(settings["model_regions"])
    zones = settings["model_regions"]
    logger.info(f"Sorted zones are {', '.join(zones)}")
    zone_num_map = {
        zone: f"{number + 1}" for zone, number in zip(zones, range(len(zones)))
    }

    if args.gens:
        gc = GeneratorClusters(
            pudl_engine=pudl_engine,
            pudl_out=pudl_out,
            settings=settings,
            current_gens=args.current_gens,
            sort_gens=args.sort_gens,
        )
        gen_clusters = gc.create_all_generators()
        gen_clusters = remove_fuel_scenario_name(gen_clusters, settings)
        gen_clusters["zone"] = gen_clusters["region"].map(zone_num_map)

    if args.load:
        load = make_final_load_curves(pudl_engine=pudl_engine, settings=settings)
        load.columns = "Load_MW_z" + load.columns.map(zone_num_map)

    if args.transmission:
        if args.gens is False:
            model_regions_gdf = load_ipm_shapefile(settings)
        else:
            model_regions_gdf = gc.model_regions_gdf
        transmission = agg_transmission_constraints(
            pudl_engine=pudl_engine, settings=settings
        )
        transmission = transmission.pipe(
            transmission_line_distance,
            ipm_shapefile=model_regions_gdf,
            settings=settings,
            units="mile",
        )

    if args.fuel and args.gens:
        fuels = fuel_cost_table(
            fuel_costs=gc.fuel_prices, generators=gc.all_resources, settings=settings
        )
        fuels["fuel_indices"] = range(1, len(fuels) + 1)
        fuels = remove_fuel_scenario_name(fuels, settings)

    logger.info(f"Write GenX input files to {args.results_folder}")
    if args.gens:
        gen_clusters.to_csv(
            out_folder / f"generator_clusters_{args.results_folder}.csv",
            index=False
            # float_format="%.3f",
        )
        # if args.all_units is True:
        # gc.all_units.to_csv(out_folder / f"all_units_{args.results_folder}.csv")

    if args.load:
        load.astype(int).to_csv(out_folder / f"load_curves_{args.results_folder}.csv")

    if args.transmission:
        transmission.to_csv(
            out_folder / f"transmission_constraints_{args.results_folder}.csv",
            float_format="%.1f",
        )

    if args.fuel and args.gens:
        fuels.to_csv(out_folder / f"Fuels_data_{args.results_folder}.csv", index=False)
def main():

    args = parse_command_line(sys.argv)
    cwd = Path.cwd()

    out_folder = cwd / args.results_folder
    out_folder.mkdir(exist_ok=True)

    # Create a logger to output any messages we might have...
    logger = logging.getLogger(powergenome.__name__)
    logger.setLevel(logging.INFO)
    handler = logging.StreamHandler()
    formatter = logging.Formatter(
        # More extensive test-like formatter...
        "%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s",
        # This is the datetime format string.
        "%Y-%m-%d %H:%M:%S",
    )
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    filehandler = logging.FileHandler(out_folder / "log.txt")
    filehandler.setFormatter(formatter)
    logger.addHandler(filehandler)

    logger.info("Reading settings file")
    settings = load_settings(path=args.settings_file)

    # Copy the settings file to results folder
    shutil.copy(args.settings_file, out_folder)

    logger.info("Initiating PUDL connections")
    pudl_engine, pudl_out = init_pudl_connection(freq="YS")
    check_settings(settings, pudl_engine)

    # Make sure everything in model_regions is either an aggregate region
    # or an IPM region. Will need to change this once we start using non-IPM
    # regions.
    ipm_regions = pd.read_sql_table("regions_entity_epaipm",
                                    pudl_engine)["region_id_epaipm"]
    all_valid_regions = ipm_regions.tolist() + list(
        settings.get("region_aggregations", {}))
    good_regions = [
        region in all_valid_regions for region in settings["model_regions"]
    ]

    if not all(good_regions):
        logger.warning(
            "One or more model regions is not valid. Check to make sure all regions "
            "are either in IPM or region_aggregations in the settings YAML file."
        )

    # Sort zones in the settings to make sure they are correctly sorted everywhere.
    settings["model_regions"] = sorted(settings["model_regions"])
    zones = settings["model_regions"]
    logger.info(f"Sorted zones are {', '.join(zones)}")
    zone_num_map = {
        zone: f"{number + 1}"
        for zone, number in zip(zones, range(len(zones)))
    }

    input_folder = cwd / settings["input_folder"]
    settings["input_folder"] = input_folder

    scenario_definitions = pd.read_csv(input_folder /
                                       settings["scenario_definitions_fn"])

    assert set(scenario_definitions["year"]) == set(
        settings["model_year"]
    ), "The years included the secenario definitions file must match the settings parameter `model_year`"
    assert len(settings["model_year"]) == len(
        settings["model_first_planning_year"]
    ), "The number of years in the settings parameter 'model_year' must be the same as 'model_first_planning_year'"

    # Build a dictionary of settings for every planning year and case_id
    scenario_settings = build_scenario_settings(settings, scenario_definitions)

    i = 0
    model_regions_gdf = None
    for year in scenario_settings:
        for case_id, _settings in scenario_settings[year].items():
            case_folder = (out_folder / f"{year}" /
                           f"{case_id}_{year}_{_settings['case_name']}")

            if i == 0:
                if args.gens:
                    gc = GeneratorClusters(
                        pudl_engine=pudl_engine,
                        pudl_out=pudl_out,
                        settings=_settings,
                        current_gens=args.current_gens,
                        sort_gens=args.sort_gens,
                    )
                    gen_clusters = gc.create_all_generators()
                    if args.fuel and args.gens:
                        fuels = fuel_cost_table(
                            fuel_costs=gc.fuel_prices,
                            generators=gc.all_resources,
                            settings=_settings,
                        )
                        fuels["fuel_indices"] = range(1, len(fuels) + 1)
                        # fuels = remove_fuel_scenario_name(fuels, _settings)
                        write_results_file(
                            df=remove_fuel_scenario_name(fuels, _settings),
                            folder=case_folder,
                            file_name="Fuels_data.csv",
                        )

                    # gen_clusters = remove_fuel_scenario_name(gen_clusters, _settings)
                    gen_clusters["zone"] = gen_clusters["region"].map(
                        zone_num_map)
                    gen_clusters = add_misc_gen_values(gen_clusters, _settings)
                    # gen_clusters = set_int_cols(gen_clusters)
                    # gen_clusters = gen_clusters.fillna(value=0)

                    # Save existing resources that aren't demand response for use in
                    # other cases
                    existing_gens = gc.existing_resources.copy()
                    # gen_clusters.loc[
                    #     (gen_clusters["Existing_Cap_MW"] >= 0)
                    #     & (gen_clusters["DR"] == 0),
                    #     :,
                    # ]
                    logger.info(
                        f"Finished first round with year {year} scenario {case_id}"
                    )
                    # if settings.get("partial_ces"):
                    gen_variability = make_generator_variability(gen_clusters)
                    gen_variability.columns = (
                        gen_clusters["region"] + "_" +
                        gen_clusters["Resource"] + "_" +
                        gen_clusters["cluster"].astype(str))
                    gens = calculate_partial_CES_values(
                        gen_clusters, fuels,
                        _settings).pipe(fix_min_power_values, gen_variability)
                    cols = [
                        c for c in _settings["generator_columns"] if c in gens
                    ]

                    write_results_file(
                        df=remove_fuel_scenario_name(
                            gens[cols].fillna(0), _settings).pipe(
                                set_int_cols).pipe(round_col_values),
                        folder=case_folder,
                        file_name="Generators_data.csv",
                        include_index=False,
                    )
                    # else:
                    #     write_results_file(
                    #         df=gen_clusters.fillna(0),
                    #         folder=case_folder,
                    #         file_name="Generators_data.csv",
                    #         include_index=False,
                    #     )

                    # write_results_file(
                    #     df=gen_variability,
                    #     folder=case_folder,
                    #     file_name="Generators_variability.csv",
                    #     include_index=True,
                    # )

                    i += 1
                if args.transmission:
                    if args.gens is False:
                        model_regions_gdf = load_ipm_shapefile(_settings)
                    else:
                        model_regions_gdf = gc.model_regions_gdf
                        transmission = agg_transmission_constraints(
                            pudl_engine=pudl_engine, settings=_settings)
                        transmission = (transmission.pipe(
                            transmission_line_distance,
                            ipm_shapefile=model_regions_gdf,
                            settings=_settings,
                            units="mile",
                        ).pipe(network_line_loss,
                               settings=_settings).pipe(
                                   network_max_reinforcement,
                                   settings=_settings).pipe(
                                       network_reinforcement_cost,
                                       settings=_settings))

                # genx_settings = make_genx_settings_file(pudl_engine, _settings)
                # write_case_settings_file(
                #     settings=genx_settings,
                #     folder=case_folder,
                #     file_name="GenX_settings.yml",
                # )

            else:
                logger.info(f"\nStarting year {year} scenario {case_id}")
                if args.gens:

                    gc.settings = _settings
                    # gc.current_gens = False

                    # Change the fuel labels in existing generators to reflect the
                    # correct AEO scenario for each fuel and update GenX tags based
                    # on settings.
                    # gc.existing_resources = existing_gens.pipe(
                    #     add_fuel_labels, gc.fuel_prices, _settings
                    # ).pipe(add_genx_model_tags, _settings)

                    gen_clusters = gc.create_all_generators()
                    # if settings.get("partial_ces"):
                    #     fuels = fuel_cost_table(
                    #         fuel_costs=gc.fuel_prices,
                    #         generators=gc.all_resources,
                    #         settings=_settings,
                    #     )
                    #     gen_clusters = calculate_partial_CES_values(
                    #         gen_clusters, fuels, _settings
                    #     )

                    gen_clusters = add_misc_gen_values(gen_clusters, _settings)
                    gen_clusters = set_int_cols(gen_clusters)
                    # gen_clusters = gen_clusters.fillna(value=0)

                    # gen_clusters = remove_fuel_scenario_name(gen_clusters, _settings)
                    gen_clusters["zone"] = gen_clusters["region"].map(
                        zone_num_map)

                    fuels = fuel_cost_table(
                        fuel_costs=gc.fuel_prices,
                        generators=gc.all_resources,
                        settings=_settings,
                    )
                    gen_variability = make_generator_variability(gen_clusters)
                    gen_variability.columns = (
                        gen_clusters["region"] + "_" +
                        gen_clusters["Resource"] + "_" +
                        gen_clusters["cluster"].astype(str) + "_" +
                        gen_clusters["R_ID"].astype(str))
                    gens = calculate_partial_CES_values(
                        gen_clusters, fuels,
                        _settings).pipe(fix_min_power_values, gen_variability)
                    cols = [
                        c for c in _settings["generator_columns"] if c in gens
                    ]
                    write_results_file(
                        df=remove_fuel_scenario_name(
                            gens[cols].fillna(0), _settings).pipe(
                                set_int_cols).pipe(round_col_values),
                        folder=case_folder,
                        file_name="Generators_data.csv",
                        include_index=False,
                    )
                    # write_results_file(
                    #     df=gen_clusters.fillna(0),
                    #     folder=case_folder,
                    #     file_name="Generators_data.csv",
                    # )

                    # write_results_file(
                    #     df=gen_variability,
                    #     folder=case_folder,
                    #     file_name="Generators_variability.csv",
                    #     include_index=True,
                    # )

            if args.load:
                load = make_final_load_curves(pudl_engine=pudl_engine,
                                              settings=_settings)
                load.columns = "Load_MW_z" + load.columns.map(zone_num_map)

                (
                    reduced_resource_profile,
                    reduced_load_profile,
                    time_series_mapping,
                ) = reduce_time_domain(gen_variability, load, _settings)
                write_results_file(
                    df=reduced_load_profile,
                    folder=case_folder,
                    file_name="Load_data.csv",
                    include_index=False,
                )
                write_results_file(
                    df=reduced_resource_profile,
                    folder=case_folder,
                    file_name="Generators_variability.csv",
                    include_index=True,
                )
                if time_series_mapping is not None:
                    write_results_file(
                        df=time_series_mapping,
                        folder=case_folder,
                        file_name="time_series_mapping.csv",
                        include_index=False,
                    )

            if args.transmission:
                # if not model_regions_gdf:
                #     if args.gens is False:
                #         model_regions_gdf = load_ipm_shapefile(_settings)
                #     else:
                #         model_regions_gdf = gc.model_regions_gdf
                # transmission = agg_transmission_constraints(
                #     pudl_engine=pudl_engine, settings=_settings
                # )
                transmission = transmission.pipe(
                    network_max_reinforcement,
                    settings=_settings).pipe(network_reinforcement_cost,
                                             settings=_settings)

                network = add_emission_policies(transmission, _settings)

                # Change the CES limit for cases where it's emissions based
                if "emissions_ces_limit" in _settings:
                    network = calc_emissions_ces_level(network, load,
                                                       _settings)

                # If single-value for CES, use that value for input to GenX
                # settings creation. This way values that are calculated internally
                # get used.
                if network["CES"].std() == 0:
                    ces = network["CES"].mean()
                else:
                    ces = None

                write_results_file(
                    df=network.pipe(set_int_cols).pipe(round_col_values),
                    folder=case_folder,
                    file_name="Network.csv",
                    include_index=False,
                )

            if args.fuel and args.gens:
                fuels = fuel_cost_table(
                    fuel_costs=gc.fuel_prices,
                    generators=gc.all_resources,
                    settings=_settings,
                )
                # fuels = remove_fuel_scenario_name(fuels, _settings)

                # Hack to get around the fact that fuels with different cost names
                # get added and end up as duplicates.
                fuels = fuels.drop_duplicates(subset=["Fuel"], keep="last")
                fuels["fuel_indices"] = range(1, len(fuels) + 1)
                write_results_file(
                    df=remove_fuel_scenario_name(
                        fuels,
                        _settings).pipe(set_int_cols).pipe(round_col_values),
                    folder=case_folder,
                    file_name="Fuels_data.csv",
                )

            if _settings.get("genx_settings_fn"):
                genx_settings = make_genx_settings_file(pudl_engine,
                                                        _settings,
                                                        calculated_ces=ces)
                write_case_settings_file(
                    settings=genx_settings,
                    folder=case_folder,
                    file_name="GenX_settings.yml",
                )
            write_case_settings_file(
                settings=_settings,
                folder=case_folder,
                file_name="powergenome_case_settings.yml",
            )
Esempio n. 4
0
def test_settings():
    settings = load_settings(DATA_PATHS["test_data"] /
                             "pudl_data_extraction.yml")
    return settings