Example #1
0
def test_parallel_agg(resolution=64):
    """Test that parallel aggregation yields the same results as serial
    aggregation."""

    gids = list(range(50, 70))
    summary_serial = SupplyCurveAggregation.summary(EXCL,
                                                    GEN,
                                                    TM_DSET,
                                                    excl_dict=EXCL_DICT,
                                                    res_class_dset=None,
                                                    res_class_bins=None,
                                                    resolution=resolution,
                                                    gids=gids,
                                                    max_workers=1)
    summary_parallel = SupplyCurveAggregation.summary(EXCL,
                                                      GEN,
                                                      TM_DSET,
                                                      excl_dict=EXCL_DICT,
                                                      res_class_dset=None,
                                                      res_class_bins=None,
                                                      resolution=resolution,
                                                      gids=gids,
                                                      max_workers=3)

    assert all(summary_serial == summary_parallel)
Example #2
0
def test_vpd_fractional_excl():
    """Test variable power density with fractional exclusions"""

    gids_subset = list(range(0, 20))
    excl_dict_1 = {'ri_padus': {'exclude_values': [1]}}
    s1 = SupplyCurveAggregation.summary(EXCL, GEN, TM_DSET,
                                        excl_dict=excl_dict_1,
                                        res_class_dset=RES_CLASS_DSET,
                                        res_class_bins=RES_CLASS_BINS,
                                        data_layers=DATA_LAYERS,
                                        power_density=FVPD,
                                        max_workers=1, gids=gids_subset)

    excl_dict_2 = {'ri_padus': {'exclude_values': [1],
                                'weight': 0.5}}
    s2 = SupplyCurveAggregation.summary(EXCL, GEN, TM_DSET,
                                        excl_dict=excl_dict_2,
                                        res_class_dset=RES_CLASS_DSET,
                                        res_class_bins=RES_CLASS_BINS,
                                        data_layers=DATA_LAYERS,
                                        power_density=FVPD,
                                        max_workers=1, gids=gids_subset)

    for i in s1.index:
        cap_full = s1.loc[i, 'capacity']
        cap_half = s2.loc[i, 'capacity']

        msg = ('Variable power density for fractional exclusions failed! '
               'Index {} has cap full {} and cap half {}'
               .format(i, cap_full, cap_half))
        assert (cap_full / cap_half) == 2, msg
Example #3
0
def test_vpd_incomplete():
    """Test an incomplete VPD input and make sure an exception is raised"""
    try:
        SupplyCurveAggregation.summary(EXCL, GEN, TM_DSET,
                                       excl_dict=EXCL_DICT,
                                       res_class_dset=RES_CLASS_DSET,
                                       res_class_bins=RES_CLASS_BINS,
                                       data_layers=DATA_LAYERS,
                                       max_workers=1, power_density=FVPDI)
    except FileInputError as e:
        if '1314958' in str(e):
            pass
    else:
        raise Exception('Test with incomplete VPD input did not throw error!')
Example #4
0
def test_vpd():
    """Test variable power density"""

    s = SupplyCurveAggregation.summary(EXCL,
                                       GEN,
                                       TM_DSET,
                                       excl_dict=EXCL_DICT,
                                       res_class_dset=RES_CLASS_DSET,
                                       res_class_bins=RES_CLASS_BINS,
                                       data_layers=DATA_LAYERS,
                                       max_workers=1,
                                       power_density=FVPD)

    vpd = pd.read_csv(FVPD, index_col=0)
    for i in s.index:
        capacity = s.loc[i, 'capacity']
        area = s.loc[i, 'area_sq_km']
        res_gids = np.array(s.loc[i, 'res_gids'])
        gid_counts = np.array(s.loc[i, 'gid_counts'])
        vpd_per_gid = vpd.loc[res_gids, 'power_density'].values
        truth = area * (vpd_per_gid * gid_counts).sum() / gid_counts.sum()

        diff = 100 * (capacity - truth) / truth

        msg = ('Variable power density failed! Index {} has cap {} and '
               'truth {}'.format(i, capacity, truth))
        assert diff < 1, msg
Example #5
0
def test_sc_agg_econ_scale():
    """Test supply curve aggregation with LCOE scaling based on plant capacity.
    """
    data = {
        'capital_cost': 53455000,
        'fixed_operating_cost': 360000,
        'fixed_charge_rate': 0.096,
        'variable_operating_cost': 0
    }

    with tempfile.TemporaryDirectory() as td:
        gen_temp = os.path.join(td, 'ri_my_pv_gen.h5')
        shutil.copy(GEN, gen_temp)

        with h5py.File(gen_temp, 'a') as res:
            for k, v in data.items():
                arr = np.full(res['meta'].shape, v)
                res.create_dataset(k, res['meta'].shape, data=arr)
                res[k].attrs['scale_factor'] = 1.0

        eqn = '2 * capacity ** -0.3'
        s = SupplyCurveAggregation.summary(EXCL,
                                           gen_temp,
                                           TM_DSET,
                                           excl_dict=EXCL_DICT,
                                           res_class_dset=RES_CLASS_DSET,
                                           res_class_bins=RES_CLASS_BINS,
                                           data_layers=DATA_LAYERS,
                                           gids=list(np.arange(10)),
                                           max_workers=1,
                                           cap_cost_scale=eqn)

        aep = s['capacity'] * s['mean_cf'] * 8760 * 1000

        true_raw_lcoe = ((data['fixed_charge_rate'] * data['capital_cost'] +
                          data['fixed_operating_cost']) / aep +
                         data['variable_operating_cost'])
        true_raw_lcoe *= 1000  # convert $/kwh -> $/MWh

        # Back out the fcr * capital_cost term ($)
        x = ((s['raw_lcoe'] / 1000 - data['variable_operating_cost']) * aep -
             data['fixed_operating_cost'])
        eval_inputs = {k: s[k].values.flatten() for k in s.columns}
        # pylint: disable=eval-used
        scalars = eval(str(eqn), globals(), eval_inputs)
        s['scalars'] = scalars
        x *= scalars
        true_scaled_lcoe = ((x + data['fixed_operating_cost']) / aep +
                            data['variable_operating_cost'])
        true_scaled_lcoe *= 1000  # convert $/kwh -> $/MWh

        assert np.allclose(true_scaled_lcoe, s['mean_lcoe'])
        assert np.allclose(true_raw_lcoe, s['raw_lcoe'])
        s = s.sort_values('capacity')
        assert all(s['mean_lcoe'].diff()[1:] < 0)
        for i in s.index.values:
            if s.loc[i, 'scalars'] < 1:
                assert s.loc[i, 'mean_lcoe'] < s.loc[i, 'raw_lcoe']
            else:
                assert s.loc[i, 'mean_lcoe'] >= s.loc[i, 'raw_lcoe']
Example #6
0
def test_aggregation_scalar_excl():
    """Test the aggregation summary with exclusions of 0.5"""

    gids_subset = list(range(0, 20))
    excl_dict_1 = {'ri_padus': {'exclude_values': [1]}}
    s1 = SupplyCurveAggregation.summary(EXCL,
                                        GEN,
                                        TM_DSET,
                                        excl_dict=excl_dict_1,
                                        res_class_dset=RES_CLASS_DSET,
                                        res_class_bins=RES_CLASS_BINS,
                                        data_layers=DATA_LAYERS,
                                        max_workers=1,
                                        gids=gids_subset)
    excl_dict_2 = {'ri_padus': {'exclude_values': [1], 'weight': 0.5}}
    s2 = SupplyCurveAggregation.summary(EXCL,
                                        GEN,
                                        TM_DSET,
                                        excl_dict=excl_dict_2,
                                        res_class_dset=RES_CLASS_DSET,
                                        res_class_bins=RES_CLASS_BINS,
                                        data_layers=DATA_LAYERS,
                                        max_workers=1,
                                        gids=gids_subset)

    dsets = ['area_sq_km', 'capacity']
    for dset in dsets:
        diff = (s1[dset].values / s2[dset].values)
        msg = ('Fractional exclusions failed for {} which has values {} and {}'
               .format(dset, s1[dset].values, s2[dset].values))
        assert all(diff == 2), msg

    for i in s1.index:
        counts_full = s1.loc[i, 'gid_counts']
        counts_half = s2.loc[i, 'gid_counts']

        for j, counts in enumerate(counts_full):
            msg = ('GID counts for fractional exclusions failed for index {}!'.
                   format(i))
            assert counts == 2 * counts_half[j], msg
def test_aggregation_gen_econ():
    """Test the aggregation summary method with separate gen and econ
    input files."""

    s1 = SupplyCurveAggregation.summary(EXCL,
                                        GEN,
                                        TM_DSET,
                                        excl_dict=EXCL_DICT,
                                        res_class_dset=RES_CLASS_DSET,
                                        res_class_bins=RES_CLASS_BINS,
                                        data_layers=DATA_LAYERS,
                                        max_workers=1)
    s2 = SupplyCurveAggregation.summary(EXCL,
                                        ONLY_GEN,
                                        TM_DSET,
                                        econ_fpath=ONLY_ECON,
                                        excl_dict=EXCL_DICT,
                                        res_class_dset=RES_CLASS_DSET,
                                        res_class_bins=RES_CLASS_BINS,
                                        data_layers=DATA_LAYERS,
                                        max_workers=1)
    assert_frame_equal(s1, s2)
Example #8
0
def test_aggregation_category_layer():
    """Test aggregation of data layers with category method"""
    data_layers = {
        'pct_slope': {
            'dset': 'ri_srtm_slope',
            'method': 'mean'
        },
        'reeds_region': {
            'dset': 'ri_reeds_regions',
            'method': 'category'
        },
        'padus': {
            'dset': 'ri_padus',
            'method': 'category'
        }
    }

    s = SupplyCurveAggregation.summary(EXCL,
                                       GEN,
                                       TM_DSET,
                                       EXCL_DICT,
                                       res_class_dset=RES_CLASS_DSET,
                                       res_class_bins=RES_CLASS_BINS,
                                       data_layers=data_layers,
                                       max_workers=1)

    for i in s.index.values:
        counts = s.loc[i, 'gid_counts']
        rr = s.loc[i, 'reeds_region']
        assert isinstance(rr, str)
        rr = json.loads(rr)
        assert isinstance(rr, dict)
        rr_sum = sum(list(rr.values()))
        padus = s.loc[i, 'padus']
        assert isinstance(padus, str)
        padus = json.loads(padus)
        assert isinstance(padus, dict)
        padus_sum = sum(list(padus.values()))
        try:
            assert padus_sum == sum(counts)
            assert padus_sum >= rr_sum
        except AssertionError:
            e = ('Categorical data layer aggregation failed:\n{}'.format(
                s.loc[i]))
            raise RuntimeError(e)
Example #9
0
def plot_sc_offshore(plot_var='mean_lcoe'):
    """Plot the supply curve map colored by plot_var."""
    import matplotlib.pyplot as plt

    s = SupplyCurveAggregation.summary(EXCL_FPATH,
                                       OFFSHORE_BASELINE,
                                       TM_DSET,
                                       excl_dict=EXCL_DICT,
                                       res_class_dset=RES_CLASS_DSET,
                                       res_class_bins=RES_CLASS_BINS,
                                       cf_dset=CF_DSET,
                                       lcoe_dset=LCOE_DSET,
                                       data_layers=DATA_LAYERS,
                                       max_workers=1)

    plt.scatter(s['longitude'], s['latitude'], c=s[plot_var], marker='s')
    plt.axis('equal')
    plt.colorbar(label=plot_var)
Example #10
0
def test_aggregation_extent(resolution=64):
    """Get the SC points aggregation summary and test that there are expected
    columns and that all resource gids were found"""

    summary = SupplyCurveAggregation.summary(EXCL,
                                             GEN,
                                             TM_DSET,
                                             excl_dict=EXCL_DICT,
                                             res_class_dset=None,
                                             res_class_bins=None,
                                             data_layers=DATA_LAYERS,
                                             resolution=resolution)

    all_res_gids = []
    for gids in summary['res_gids']:
        all_res_gids += gids

    assert 'sc_col_ind' in summary
    assert 'sc_row_ind' in summary
    assert 'gen_gids' in summary
    assert len(set(all_res_gids)) == 177
Example #11
0
def test_sc_agg_offshore():
    """Test the SC offshore aggregation and check offshore SC points against
    known offshore gen points."""

    s = SupplyCurveAggregation.summary(EXCL_FPATH,
                                       OFFSHORE_BASELINE,
                                       TM_DSET,
                                       excl_dict=EXCL_DICT,
                                       res_class_dset=RES_CLASS_DSET,
                                       res_class_bins=RES_CLASS_BINS,
                                       cf_dset=CF_DSET,
                                       lcoe_dset=LCOE_DSET,
                                       data_layers=DATA_LAYERS,
                                       max_workers=1)

    for col in Offshore.DEFAULT_META_COLS:
        msg = ('Offshore data column "{}" was not passed through to agg table'.
               format(col))
        assert col in s, msg

    with Outputs(OFFSHORE_BASELINE, mode='r') as out:
        meta = out.meta

    offshore_mask = (meta.offshore == 1)
    offshore_gids = meta.loc[offshore_mask, 'gid'].values.tolist()

    for sc_gid in s.index:
        if s.at[sc_gid, 'offshore']:
            assert int(s.at[sc_gid, 'farm_gid']) in offshore_gids
            assert all(np.array(json.loads(s.at[sc_gid, 'res_gids'])) < 3e6)
            assert s.at[sc_gid, 'elevation'] == 0.0
            assert s.at[sc_gid, 'capacity'] == 600
            assert np.isnan(s.at[sc_gid, 'pct_slope'])
        else:
            for res_gid in s.at[sc_gid, 'res_gids']:
                assert res_gid not in offshore_gids

    for gid in offshore_gids:
        assert gid in s['farm_gid'].values
Example #12
0
def test_aggregation_summary():
    """Test the aggregation summary method against a baseline file."""

    s = SupplyCurveAggregation.summary(EXCL,
                                       GEN,
                                       TM_DSET,
                                       excl_dict=EXCL_DICT,
                                       res_class_dset=RES_CLASS_DSET,
                                       res_class_bins=RES_CLASS_BINS,
                                       data_layers=DATA_LAYERS,
                                       max_workers=1)

    if not os.path.exists(AGG_BASELINE):
        s.to_csv(AGG_BASELINE)
        raise Exception('Aggregation summary baseline file did not exist. '
                        'Created: {}'.format(AGG_BASELINE))

    else:
        for c in ['res_gids', 'gen_gids', 'gid_counts']:
            s[c] = s[c].astype(str)

        s_baseline = pd.read_csv(AGG_BASELINE, index_col=0)

        assert_frame_equal(s, s_baseline, check_dtype=False)
def test_agg_friction(gid):
    """Test SC Aggregation with friction by checking friction factors and LCOE
    against a hand calc."""

    warnings.filterwarnings('ignore')

    for gid in [100, 114, 130, 181]:
        s = SupplyCurveAggregation.summary(EXCL_FPATH,
                                           GEN,
                                           TM_DSET,
                                           excl_dict=EXCL_DICT,
                                           res_class_dset=RES_CLASS_DSET,
                                           res_class_bins=RES_CLASS_BINS,
                                           data_layers=DATA_LAYERS,
                                           resolution=RESOLUTION,
                                           gids=[gid],
                                           max_workers=1,
                                           friction_fpath=FRICTION_FPATH,
                                           friction_dset=FRICTION_DSET)

        row_slice, col_slice = EXTENT.get_excl_slices(gid)

        test_e = EXCL[row_slice, col_slice]
        test_f = FRICTION[row_slice, col_slice]
        x = test_e * test_f
        x = x.flatten()
        x = x[(x != 0)]
        mean_friction = x.mean()

        m = ('SC point gid {} does not match mean friction hand calc'.format(
            gid))
        assert s['mean_friction'].values[0] == mean_friction, m
        m = ('SC point gid {} does not match mean LCOE with friction hand calc'
             .format(gid))
        assert np.allclose(s['mean_lcoe_friction'],
                           s['mean_lcoe'] * mean_friction), m
def test_aggregation_extra_dsets():
    """Test aggregation with extra datasets to aggregate."""
    h5_dsets = ['lcoe_fcr-2012', 'lcoe_fcr-2013', 'lcoe_fcr-stdev']
    s = SupplyCurveAggregation.summary(EXCL,
                                       ONLY_GEN,
                                       TM_DSET,
                                       h5_dsets=h5_dsets,
                                       econ_fpath=ONLY_ECON,
                                       excl_dict=EXCL_DICT,
                                       res_class_dset=RES_CLASS_DSET,
                                       res_class_bins=RES_CLASS_BINS,
                                       data_layers=DATA_LAYERS,
                                       max_workers=1)

    for dset in h5_dsets:
        assert 'mean_{}'.format(dset) in s.columns

    check = s['mean_lcoe_fcr-2012'] == s['mean_lcoe']
    assert not any(check)
    check = s['mean_lcoe_fcr-2013'] == s['mean_lcoe']
    assert not any(check)

    avg = (s['mean_lcoe_fcr-2012'] + s['mean_lcoe_fcr-2013']) / 2
    assert np.allclose(avg.values, s['mean_lcoe'].values)
Example #15
0
def direct(ctx, excl_fpath, gen_fpath, econ_fpath, res_fpath, tm_dset,
           excl_dict, check_excl_layers, res_class_dset, res_class_bins,
           cf_dset, lcoe_dset, h5_dsets, data_layers, resolution, excl_area,
           power_density, area_filter_kernel, min_area, friction_fpath,
           friction_dset, out_dir, log_dir, verbose):
    """reV Supply Curve Aggregation Summary CLI."""
    name = ctx.obj['NAME']
    ctx.obj['EXCL_FPATH'] = excl_fpath
    ctx.obj['GEN_FPATH'] = gen_fpath
    ctx.obj['ECON_FPATH'] = econ_fpath
    ctx.obj['RES_FPATH'] = res_fpath
    ctx.obj['TM_DSET'] = tm_dset
    ctx.obj['EXCL_DICT'] = excl_dict
    ctx.obj['CHECK_LAYERS'] = check_excl_layers
    ctx.obj['RES_CLASS_DSET'] = res_class_dset
    ctx.obj['RES_CLASS_BINS'] = res_class_bins
    ctx.obj['CF_DSET'] = cf_dset
    ctx.obj['LCOE_DSET'] = lcoe_dset
    ctx.obj['H5_DSETS'] = h5_dsets
    ctx.obj['DATA_LAYERS'] = data_layers
    ctx.obj['RESOLUTION'] = resolution
    ctx.obj['EXCL_AREA'] = excl_area
    ctx.obj['POWER_DENSITY'] = power_density
    ctx.obj['AREA_FILTER_KERNEL'] = area_filter_kernel
    ctx.obj['MIN_AREA'] = min_area
    ctx.obj['FRICTION_FPATH'] = friction_fpath
    ctx.obj['FRICTION_DSET'] = friction_dset
    ctx.obj['OUT_DIR'] = out_dir
    ctx.obj['LOG_DIR'] = log_dir
    ctx.obj['VERBOSE'] = verbose

    if ctx.invoked_subcommand is None:
        t0 = time.time()
        init_mult(name,
                  log_dir,
                  modules=[__name__, 'reV.supply_curve'],
                  verbose=verbose)

        with h5py.File(excl_fpath, mode='r') as f:
            dsets = list(f)
        if tm_dset not in dsets:
            try:
                TechMapping.run(excl_fpath, res_fpath, tm_dset)
            except Exception as e:
                logger.exception('TechMapping process failed. Received the '
                                 'following error:\n{}'.format(e))
                raise e

        if isinstance(excl_dict, str):
            excl_dict = dict_str_load(excl_dict)

        if isinstance(data_layers, str):
            data_layers = dict_str_load(data_layers)

        try:
            summary = SupplyCurveAggregation.summary(
                excl_fpath,
                gen_fpath,
                tm_dset,
                econ_fpath=econ_fpath,
                excl_dict=excl_dict,
                res_class_dset=res_class_dset,
                res_class_bins=res_class_bins,
                cf_dset=cf_dset,
                lcoe_dset=lcoe_dset,
                h5_dsets=h5_dsets,
                data_layers=data_layers,
                resolution=resolution,
                excl_area=excl_area,
                power_density=power_density,
                area_filter_kernel=area_filter_kernel,
                min_area=min_area,
                friction_fpath=friction_fpath,
                friction_dset=friction_dset,
                check_excl_layers=check_excl_layers)

        except Exception as e:
            logger.exception('Supply curve Aggregation failed. Received the '
                             'following error:\n{}'.format(e))
            raise e

        fn_out = '{}.csv'.format(name)
        fpath_out = os.path.join(out_dir, fn_out)
        summary.to_csv(fpath_out)

        runtime = (time.time() - t0) / 60
        logger.info('Supply curve aggregation complete. '
                    'Time elapsed: {:.2f} min. Target output dir: {}'.format(
                        runtime, out_dir))

        finput = [excl_fpath, gen_fpath]
        if res_fpath is not None:
            finput.append(res_fpath)

        # add job to reV status file.
        status = {
            'dirout': out_dir,
            'fout': fn_out,
            'job_status': 'successful',
            'runtime': runtime,
            'finput': finput,
            'excl_fpath': excl_fpath,
            'excl_dict': excl_dict,
            'area_filter_kernel': area_filter_kernel,
            'min_area': min_area
        }
        Status.make_job_file(out_dir, 'supply-curve-aggregation', name, status)
def test_data_layer_methods():
    """Test aggregation of data layers with different methods"""
    data_layers = {
        'pct_slope_mean': {
            'dset': 'ri_srtm_slope',
            'method': 'mean'
        },
        'pct_slope_max': {
            'dset': 'ri_srtm_slope',
            'method': 'max'
        },
        'pct_slope_min': {
            'dset': 'ri_srtm_slope',
            'method': 'min'
        },
        'reeds_region': {
            'dset': 'ri_reeds_regions',
            'method': 'category'
        },
        'padus': {
            'dset': 'ri_padus',
            'method': 'category'
        }
    }

    s = SupplyCurveAggregation.summary(EXCL,
                                       GEN,
                                       TM_DSET,
                                       excl_dict=EXCL_DICT,
                                       res_class_dset=RES_CLASS_DSET,
                                       res_class_bins=RES_CLASS_BINS,
                                       data_layers=data_layers,
                                       max_workers=1)

    for i in s.index.values:

        # Check categorical data layers
        counts = s.loc[i, 'gid_counts']
        rr = s.loc[i, 'reeds_region']
        assert isinstance(rr, str)
        rr = json.loads(rr)
        assert isinstance(rr, dict)
        rr_sum = sum(list(rr.values()))
        padus = s.loc[i, 'padus']
        assert isinstance(padus, str)
        padus = json.loads(padus)
        assert isinstance(padus, dict)
        padus_sum = sum(list(padus.values()))
        try:
            assert padus_sum == sum(counts)
            assert padus_sum >= rr_sum
        except AssertionError:
            e = ('Categorical data layer aggregation failed:\n{}'.format(
                s.loc[i]))
            raise RuntimeError(e)

        # Check min/mean/max of the same data layer
        n = s.loc[i, 'n_gids']
        slope_mean = s.loc[i, 'pct_slope_mean']
        slope_max = s.loc[i, 'pct_slope_max']
        slope_min = s.loc[i, 'pct_slope_min']
        if n > 3:  # sc points with <= 3 90m pixels can have min == mean == max
            assert slope_min < slope_mean < slope_max
        else:
            assert slope_min <= slope_mean <= slope_max