Exemplo n.º 1
0
def decompose_plant_data_frame_into_areas_and_resources(
        df, areas, resources, grid):
    """Take a plant-column data frame and decompose it into plant-column data frames
    for each resources-areas combinations.

    :param pandas.DataFrame df: data frame, columns are plant id in grid.
    :param dict areas: areas to use for decomposition. Keys are area types
        ('*loadzone*', '*state*' or '*interconnect*'), values are
        str/list/tuple/set of areas.
    :param str/list/tuple/set resources: resource(s) to use for decomposition.
    :param powersimdata.input.grid.Grid grid: Grid instance.
    :return: (*dict*) -- keys are areas, values are dictionaries whose keys are
        resources and values are data frames indexed by (datetime, plant) where plant
        include only plants of matching type and located in area.
    """
    # areas_resources = defaultdict(dict)
    df_areas_resources = {}
    resources = _check_resources_are_in_grid_and_format(resources, grid)

    for a, df_a in decompose_plant_data_frame_into_areas(df, areas,
                                                         grid).items():
        df_areas_resources[a] = decompose_plant_data_frame_into_resources(
            df_a, resources, grid)

    return df_areas_resources
Exemplo n.º 2
0
def calculate_NLDC(scenario, resources, hours=100):
    """Calculate the capacity value of a class of resources by comparing the
    mean of the top N hour of absolute demand to the mean of the top N hours of
    net demand. NLDC = 'Net Load Duration Curve'.

    :param powersimdata.scenario.scenario.Scenario scenario: analyzed scenario.
    :param str/list/tuple/set resources: one or more resources to analyze.
    :param int hours: number of hours to analyze.
    :return: (*float*) -- difference between peak demand and peak net demand.
    """
    _check_scenario_is_in_analyze_state(scenario)
    grid = scenario.state.get_grid()
    resources = _check_resources_are_in_grid_and_format(resources, grid)
    _check_number_hours_to_analyze(scenario, hours)

    # Then calculate capacity value
    total_demand = scenario.state.get_demand().sum(axis=1)
    prev_peak = total_demand.sort_values(ascending=False).head(hours).mean()
    plant_groupby = grid.plant.groupby("type")
    plant_indices = sum(
        [plant_groupby.get_group(r).index.tolist() for r in resources], [])
    resource_generation = scenario.state.get_pg()[plant_indices].sum(axis=1)
    net_demand = total_demand - resource_generation
    net_peak = net_demand.sort_values(ascending=False).head(hours).mean()
    return prev_peak - net_peak
Exemplo n.º 3
0
def get_plant_id_for_resources(resources, grid):
    """Get plant id for plants fueled by resource(s).

    :param str/list/tuple/set resources: name of resource(s).
    :param powersimdata.input.grid.Grid grid: Grid instance.
    :return: (*set*) -- list of plant id.
    """
    resources = _check_resources_are_in_grid_and_format(resources, grid)
    plant = grid.plant
    plant_id = plant[(plant.type.isin(resources))].index
    return set(plant_id)
Exemplo n.º 4
0
def decompose_plant_data_frame_into_resources(df, resources, grid):
    """Take a plant-column data frame and decompose it into plant-column data frames
    for each resource.

    :param pandas.DataFrame df: data frame, columns are plant id in grid.
    :param str/list/tuple/set resources: resource(s) to use for decomposition.
    :param powersimdata.input.grid.Grid grid: Grid instance.
    :return: (*dict*) -- keys are resources, values are plant-column data frames.
    """
    _check_data_frame(df, "PG")
    plant_id = set(df.columns)
    _check_plants_are_in_grid(plant_id, grid)
    resources = _check_resources_are_in_grid_and_format(resources, grid)

    df_resources = {
        r:
        df[get_plant_id_for_resources(r, grid) & plant_id].sort_index(axis=1)
        for r in resources
    }
    return df_resources
Exemplo n.º 5
0
def calculate_net_load_peak(scenario, resources, hours=100):
    """Calculate the capacity value of a class of resources by averaging the
    power generated in the top N hours of net load peak.

    :param powersimdata.scenario.scenario.Scenario scenario: analyzed scenario.
    :param str/list/tuple/set resources: one or more resources to analyze.
    :param int hours: number of hours to analyze.
    :return: (*float*) -- resource capacity during hours of peak net demand.
    """
    _check_scenario_is_in_analyze_state(scenario)
    grid = scenario.state.get_grid()
    resources = _check_resources_are_in_grid_and_format(resources, grid)
    _check_number_hours_to_analyze(scenario, hours)

    # Then calculate capacity value
    total_demand = scenario.state.get_demand().sum(axis=1)
    plant_groupby = grid.plant.groupby("type")
    plant_indices = sum(
        [plant_groupby.get_group(r).index.tolist() for r in resources], [])
    resource_generation = scenario.state.get_pg()[plant_indices].sum(axis=1)
    net_demand = total_demand - resource_generation
    top_hours = net_demand.sort_values(ascending=False).head(hours).index
    return resource_generation[top_hours].mean()
Exemplo n.º 6
0
def test_check_resources_are_in_grid_and_format():
    _check_resources_are_in_grid_and_format({"solar", "coal", "hydro"}, grid)
    _check_resources_are_in_grid_and_format(["solar", "ng", "hydro", "nuclear"], grid)
Exemplo n.º 7
0
def test_check_resources_are_in_grid_and_format_argument_value():
    arg = (({"solar", "dfo"}, grid), ({"uranium"}, grid))
    for a in arg:
        with pytest.raises(ValueError):
            _check_resources_are_in_grid_and_format(a[0], a[1])