def test_bifacial(): """Test pvwattsv7 with bifacial panel with albedo.""" year = 2012 rev2_points = slice(0, 1) res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_{}.h5'.format(year) sam_files = TESTDATADIR + '/SAM/i_pvwattsv7.json' # run reV 2.0 generation pp = ProjectPoints(rev2_points, sam_files, 'pvwattsv7', res_file=res_file) gen = Gen.reV_run(tech='pvwattsv7', points=rev2_points, sam_files=sam_files, res_file=res_file, max_workers=1, sites_per_worker=1, fout=None) sam_files = TESTDATADIR + '/SAM/i_pvwattsv7_bifacial.json' # run reV 2.0 generation pp = ProjectPoints(rev2_points, sam_files, 'pvwattsv7', res_file=res_file) gen_bi = Gen.reV_run(tech='pvwattsv7', points=rev2_points, sam_files=sam_files, res_file=res_file, max_workers=1, sites_per_worker=1, fout=None) assert all(gen_bi.out['cf_mean'] > gen.out['cf_mean']) assert np.isclose(gen.out['cf_mean'][0], 0.151, atol=0.005) assert np.isclose(gen_bi.out['cf_mean'][0], 0.162, atol=0.005)
def test_pvwatts_v5_v7(): """Test reV pvwatts generation for v5 vs. v7""" year = 2012 rev2_points = slice(0, 3) res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_{}.h5'.format(year) sam_files = TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json' # run reV 2.0 generation pp = ProjectPoints(rev2_points, sam_files, 'pvwattsv7', res_file=res_file) gen7 = Gen.reV_run(tech='pvwattsv7', points=rev2_points, sam_files=sam_files, res_file=res_file, max_workers=1, sites_per_worker=1, fout=None) pp = ProjectPoints(rev2_points, sam_files, 'pvwattsv5', res_file=res_file) gen5 = Gen.reV_run(tech='pvwattsv5', points=rev2_points, sam_files=sam_files, res_file=res_file, max_workers=1, sites_per_worker=1, fout=None) msg = 'PVwatts v5 and v7 did not match within test tolerance' assert np.allclose(gen7.out['cf_mean'], gen5.out['cf_mean'], atol=3), msg
def _parse_points(ctx): """ Parse project points from CLI inputs Parameters ---------- ctx : dict CLI context object Returns ------- points : slice | list | string | ProjectPoints ProjectPoints or points to initialize ProjectPoints """ tech = ctx.obj['TECH'] points = ctx.obj['POINTS'] sam_files = ctx.obj['SAM_FILES'] res_file = ctx.obj['RES_FILE'] curtailment = ctx.obj['CURTAILMENT'] lat_lon_fpath = ctx.obj.get('LAT_LON_FPATH', None) lat_lon_coords = ctx.obj.get('LAT_LON_COORDS', None) regions = ctx.obj.get('REGIONS', None) region = ctx.obj.get('REGION', None) region_col = ctx.obj.get('REGION_COL', 'state') i = 0 if points is not None: i += 1 if lat_lon_fpath is not None or lat_lon_coords: lat_lons = _parse_lat_lons(lat_lon_fpath, lat_lon_coords) points = ProjectPoints.lat_lon_coords(lat_lons, res_file, sam_files, tech=tech, curtailment=curtailment) i += 1 if region is not None or regions is not None: regions = _parse_regions(regions, region, region_col) points = ProjectPoints.regions(regions, res_file, sam_files, tech=tech, curtailment=curtailment) i += 1 msg = None if i == 0: msg = ("reV Gen requires one of 'points', 'lat-lon-fpath', " "'lat-lon-coords', 'regions', or 'region' and region-col' " "must be supplied to determine points to compute generation " "for!") elif i > 1: msg = ("reV Gen can only produce a unique set of Project Points for " "a single input value for ONE of 'points', 'lat-lon-fpath', " "'lat-lon-coords', 'regions', or 'region' and region-col'") if msg is not None: logger.error(msg) raise ProjectPointsValueError(msg) return points
def test_pv_gen_csv2(f_rev1_out='project_outputs.h5', rev2_points=TESTDATADIR + '/project_points/ri.csv', res_file=TESTDATADIR + '/nsrdb/ri_100_nsrdb_2012.h5'): """Test project points csv input with list-based sam files.""" rev1_outs = os.path.join(TESTDATADIR, 'ri_pv', 'scalar_outputs', f_rev1_out) sam_files = [ TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json', TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json' ] sam_files = {'sam_param_{}'.format(i): k for i, k in enumerate(sam_files)} pp = ProjectPoints(rev2_points, sam_files, 'pvwattsv5') gen = Gen.reV_run(tech='pvwattsv5', points=rev2_points, sam_files=sam_files, res_file=res_file, fout=None) gen_outs = list(gen.out['cf_mean']) # initialize the rev1 output hander with pv_results(rev1_outs) as pv: # get reV 1.0 results cf_mean_list = pv.get_cf_mean(pp.sites, '2012') # benchmark the results result = np.allclose(gen_outs, cf_mean_list, rtol=RTOL, atol=ATOL) assert result is True
def test_pv_gen_slice(f_rev1_out, rev2_points, year, max_workers): """Test reV 2.0 generation for PV and benchmark against reV 1.0 results.""" # get full file paths. rev1_outs = os.path.join(TESTDATADIR, 'ri_pv', 'scalar_outputs', f_rev1_out) sam_files = TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json' res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_{}.h5'.format(year) # run reV 2.0 generation pp = ProjectPoints(rev2_points, sam_files, 'pvwattsv5', res_file=res_file) gen = Gen.reV_run(tech='pvwattsv5', points=rev2_points, sam_files=sam_files, res_file=res_file, max_workers=max_workers, sites_per_worker=3, fout=None) gen_outs = list(gen.out['cf_mean']) # initialize the rev1 output hander with pv_results(rev1_outs) as pv: # get reV 1.0 results cf_mean_list = pv.get_cf_mean(pp.sites, year) # benchmark the results result = np.allclose(gen_outs, cf_mean_list, rtol=RTOL, atol=ATOL) assert result is True
def test_PV_lat_tilt(res, site_index): """Test the method to set tilt based on latitude.""" rev2_points = TESTDATADIR + '/project_points/ri.csv' sam_files = [ TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json', TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json' ] sam_files = {'sam_param_{}'.format(i): k for i, k in enumerate(sam_files)} pp = ProjectPoints(rev2_points, sam_files, 'pv') for i, [res_df, meta] in enumerate(res): if i == site_index: # get SAM inputs from project_points based on the current site site = res_df.name config, inputs = pp[site] inputs['tilt'] = 'latitude' # iterate through requested sites. with warnings.catch_warnings(): warnings.simplefilter("ignore") sim = Pvwattsv5(resource=res_df, meta=meta, sam_sys_inputs=inputs, output_request=('cf_mean', )) break else: pass assert sim.sam_sys_inputs['tilt'] == meta['latitude']
def get_node_pc(points, sam_files, nodes): """Get a PointsControl object to be send to HPC nodes. Parameters ---------- points : slice | str | list | tuple Slice/list specifying project points, string pointing to a project points csv. sam_files : dict | str | list SAM input configuration ID(s) and file path(s). Keys are the SAM config ID(s), top level value is the SAM path. Can also be a single config file str. If it's a list, it is mapped to the sorted list of unique configs requested by points csv. nodes : int Number of nodes that the PointsControl object is being split to. Returns ------- pc : reV.config.project_points.PointsControl PointsControl object to be iterated and send to HPC nodes. """ if isinstance(points, (str, slice, list, tuple)): # create points control via points pp = ProjectPoints(points, sam_files, tech=None) sites_per_node = ceil(len(pp) / nodes) pc = PointsControl(pp, sites_per_split=sites_per_node) else: raise TypeError('Econ Points input type is unrecognized: ' '"{}"'.format(type(points))) return pc
def parse_points_control(self): """Get the generation points control object. Returns ------- points_control : reV.config.project_points.PointsControl PointsControl object based on specified project points and execution control option. """ if self._pc is None: # make an instance of project points pp = ProjectPoints(self.project_points, self['sam_files'], tech=self.technology) if (self.execution_control.option == 'peregrine' or self.execution_control.option == 'eagle'): # sites per split on peregrine or eagle is the number of sites # in project points / number of nodes. This is for the initial # division of the project sites between HPC nodes (jobs) sites_per_worker = ceil(len(pp) / self.execution_control.nodes) elif self.execution_control.option == 'local': # sites per split on local is number of sites / # of processes sites_per_worker = ceil( len(pp) / self.execution_control.max_workers) # make an instance of points control and set to protected attribute self._pc = PointsControl(pp, sites_per_split=sites_per_worker) return self._pc
def test_wind_gen_slice(f_rev1_out, rev2_points, year, max_workers): """Test reV 2.0 generation for PV and benchmark against reV 1.0 results.""" # get full file paths. rev1_outs = os.path.join(TESTDATADIR, 'ri_wind', 'scalar_outputs', f_rev1_out) sam_files = TESTDATADIR + '/SAM/wind_gen_standard_losses_0.json' res_file = TESTDATADIR + '/wtk/ri_100_wtk_{}.h5'.format(year) # run reV 2.0 generation pp = ProjectPoints(rev2_points, sam_files, 'windpower', res_file=res_file) gen = Gen.reV_run('windpower', rev2_points, sam_files, res_file, max_workers=max_workers, sites_per_worker=3, fout=None) gen_outs = list(gen.out['cf_mean']) # initialize the rev1 output hander with wind_results(rev1_outs) as wind: # get reV 1.0 results cf_mean_list = wind.get_cf_mean(pp.sites, year) # benchmark the results result = np.allclose(gen_outs, cf_mean_list, rtol=RTOL, atol=ATOL) msg = 'Wind cf_means results did not match reV 1.0 results!' assert result is True, msg
def from_lat_lons(ctx, lat_lon_fpath, lat_lon_coords): """Convert latitude and longitude coordinates to ProjectPoints""" lat_lons = _parse_lat_lons(lat_lon_fpath, lat_lon_coords) logger.info('Creating ProjectPoints from {} and saving to {}'.format( lat_lons, ctx.obj['FPATH'])) pp = ProjectPoints.lat_lon_coords(lat_lons, ctx.obj['RES_FILE'], ctx.obj['SAM_FILE']) pp.df.to_csv(ctx.obj['FPATH'])
def from_regions(ctx, regions, region, region_col): """Extract ProjectPoints for given geographic regions""" regions = _parse_regions(regions, region, region_col) logger.info('Creating ProjectPoints from {} and saving to {}'.format( regions, ctx.obj['FPATH'])) pp = ProjectPoints.regions(regions, ctx.obj['RES_FILE'], ctx.obj['SAM_FILE']) pp.df.to_csv(ctx.obj['FPATH'])
def test_duplicate_coords(): """ Test ProjectPoint.lat_lon_coords duplicate coords error """ res_file = os.path.join(TESTDATADIR, 'nsrdb/', 'ri_100_nsrdb_2012.h5') sam_files = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13_cs.json') with Resource(res_file) as f: meta = f.meta duplicates = meta.loc[[2, 3, 3, 4], ['latitude', 'longitude']].values with pytest.raises(RuntimeError): ProjectPoints.lat_lon_coords(duplicates, res_file, sam_files) regions = {'Kent': 'county', 'Rhode Island': 'state'} with pytest.raises(RuntimeError): ProjectPoints.regions(regions, res_file, sam_files)
def test_proj_points_split(start, interval): """Test the split operation of project points.""" res_file = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5') sam_files = os.path.join(TESTDATADIR, 'SAM/wind_gen_standard_losses_0.json') pp = ProjectPoints(slice(start, 100, interval), sam_files, 'windpower', res_file=res_file) iter_interval = 5 for i0 in range(0, len(pp), iter_interval): i1 = i0 + iter_interval if i1 > len(pp): break pp_0 = ProjectPoints.split(i0, i1, pp) msg = 'ProjectPoints split did not function correctly!' assert pp_0.sites == pp.sites[i0:i1], msg assert all(pp_0.df == pp.df.iloc[i0:i1]), msg
def test_clearsky(): """ Test Clearsky """ res_file = os.path.join(TESTDATADIR, 'nsrdb/', 'ri_100_nsrdb_2012.h5') sam_files = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13_cs.json') pp = ProjectPoints(slice(0, 10), sam_files, 'pvwattsv5', res_file=res_file) with pytest.raises(ResourceRuntimeError): # Get the SAM resource object RevPySam.get_sam_res(res_file, pp, pp.tech)
def test_sam_config_kw_replace(): """Test that the SAM config with old keys from pysam v1 gets updated on the fly and gets propogated to downstream splits.""" fpp = os.path.join(TESTDATADIR, 'project_points/pp_offshore.csv') sam_files = {'onshore': os.path.join( TESTDATADIR, 'SAM/wind_gen_standard_losses_0.json'), 'offshore': os.path.join( TESTDATADIR, 'SAM/wind_gen_standard_losses_1.json')} res_file = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5') pp = ProjectPoints(fpp, sam_files, 'windpower') pc = PointsControl(pp, sites_per_split=100) gen = Gen(pc, res_file) config_on = gen.project_points.sam_configs['onshore'] config_of = gen.project_points.sam_configs['offshore'] assert 'turb_generic_loss' in config_on assert 'turb_generic_loss' in config_of pp_split = ProjectPoints.split(0, 10000, gen.project_points) config_on = pp_split.sam_configs['onshore'] config_of = pp_split.sam_configs['offshore'] assert 'turb_generic_loss' in config_on assert 'turb_generic_loss' in config_of pc_split = PointsControl.split(0, 10000, gen.project_points) config_on = pc_split.project_points.sam_configs['onshore'] config_of = pc_split.project_points.sam_configs['offshore'] assert 'turb_generic_loss' in config_on assert 'turb_generic_loss' in config_of for ipc in pc_split: if 'onshore' in ipc.project_points.sam_configs: config = ipc.project_points.sam_configs['onshore'] assert 'turb_generic_loss' in config if 'offshore' in ipc.project_points.sam_configs: config = ipc.project_points.sam_configs['offshore'] assert 'turb_generic_loss' in config
def res(): """Initialize a SAM resource object to test SAM functions on.""" res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_2012.h5' rev2_points = TESTDATADIR + '/project_points/ri.csv' sam_files = [ TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json', TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json' ] sam_files = {'sam_param_{}'.format(i): k for i, k in enumerate(sam_files)} pp = ProjectPoints(rev2_points, sam_files, 'pv') res = NSRDB.preload_SAM(res_file, pp.sites) return res
def test_config_mapping(): """Test the mapping of multiple configs in the project points.""" fpp = os.path.join(TESTDATADIR, 'project_points/pp_offshore.csv') sam_files = {'onshore': os.path.join( TESTDATADIR, 'SAM/wind_gen_standard_losses_0.json'), 'offshore': os.path.join( TESTDATADIR, 'SAM/wind_gen_standard_losses_1.json')} df = pd.read_csv(fpp, index_col=0) pp = ProjectPoints(fpp, sam_files, 'windpower') pc = PointsControl(pp, sites_per_split=100) for i, pc_split in enumerate(pc): for site in pc_split.sites: cid = pc_split.project_points[site][0] assert cid == df.loc[site].values[0]
def test_regions(counties): """ Test ProjectPoint.regions class method """ res_file = os.path.join(TESTDATADIR, 'nsrdb/', 'ri_100_nsrdb_2012.h5') sam_files = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13_cs.json') with Resource(res_file) as f: meta = f.meta baseline = meta.loc[meta['county'].isin(counties)].index.values.tolist() regions = {c: 'county' for c in counties} pp = ProjectPoints.regions(regions, res_file, sam_files) assert sorted(baseline) == pp.sites
def test_proj_control_iter(start, interval): """Test the iteration of the points control.""" n = 3 res_file = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5') sam_files = os.path.join(TESTDATADIR, 'SAM/wind_gen_standard_losses_0.json') pp = ProjectPoints(slice(start, 100, interval), sam_files, 'windpower', res_file=res_file) pc = PointsControl(pp, sites_per_split=n) for i, pp_split in enumerate(pc): i0_nom = i * n i1_nom = i * n + n split = pp_split.project_points.df target = pp.df.iloc[i0_nom:i1_nom, :] msg = 'PointsControl iterator split did not function correctly!' assert all(split == target), msg
def test_coords(sites): """ Test ProjectPoint.lat_lon_coords class method """ res_file = os.path.join(TESTDATADIR, 'nsrdb/', 'ri_100_nsrdb_2012.h5') sam_files = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13_cs.json') with Resource(res_file) as f: meta = f.meta gids = np.random.choice(meta.index.values, sites, replace=False).tolist() if not isinstance(gids, list): gids = [gids] lat_lons = meta.loc[gids, ['latitude', 'longitude']].values pp = ProjectPoints.lat_lon_coords(lat_lons, res_file, sam_files) assert sorted(gids) == pp.sites
def get_curtailment(year): """Get the curtailed and non-curtailed resource objects, and project points """ res_file = os.path.join(TESTDATADIR, 'wtk/', 'ri_100_wtk_{}.h5'.format(year)) sam_files = os.path.join(TESTDATADIR, 'SAM/wind_gen_standard_losses_0.json') curtailment = os.path.join(TESTDATADIR, 'config/', 'curtailment.json') pp = ProjectPoints(slice(0, 100), sam_files, 'windpower', curtailment=curtailment) resource = RevPySam.get_sam_res(res_file, pp, 'windpower') non_curtailed_res = deepcopy(resource) out = curtail(resource, pp.curtailment, random_seed=0) return out, non_curtailed_res, pp
def test_pv_name_error(): """Test reV 2.0 generation for PV and benchmark against reV 1.0 results.""" year = 2012 rev2_points = slice(0, 3) sam_files = TESTDATADIR + '/SAM/naris_pv_1axis_inv13.json' res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_{}.h5'.format(year) # run reV 2.0 generation with pytest.raises(KeyError) as record: pp = ProjectPoints(rev2_points, sam_files, 'pv', res_file=res_file) Gen.reV_run(tech='pv', points=rev2_points, sam_files=sam_files, res_file=res_file, max_workers=1, sites_per_worker=1, fout=None) assert 'Did not recognize' in record[0].message
def test_gen_input_mods(): """Test that the gen workers do not modify the top level input SAM config """ year = 2012 rev2_points = slice(0, 5) res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_{}.h5'.format(year) sam_files = TESTDATADIR + '/SAM/i_pvwatts_fixed_lat_tilt.json' # run reV 2.0 generation pp = ProjectPoints(rev2_points, sam_files, 'pvwattsv7', res_file=res_file) gen = Gen.reV_run(tech='pvwattsv7', points=rev2_points, sam_files=sam_files, res_file=res_file, max_workers=1, sites_per_worker=1, fout=None) for i in range(5): inputs = gen.project_points[i][1] assert inputs['tilt'] == 'latitude'
def test_pvwattsv7_baseline(): """Test reV pvwattsv7 generation against baseline data""" baseline_cf_mean = np.array([151, 151, 157]) / 1000 year = 2012 rev2_points = slice(0, 3) res_file = TESTDATADIR + '/nsrdb/ri_100_nsrdb_{}.h5'.format(year) sam_files = TESTDATADIR + '/SAM/i_pvwattsv7.json' # run reV 2.0 generation pp = ProjectPoints(rev2_points, sam_files, 'pvwattsv7', res_file=res_file) gen = Gen.reV_run(tech='pvwattsv7', points=rev2_points, sam_files=sam_files, res_file=res_file, max_workers=1, sites_per_worker=1, fout=None) msg = ('PVWattsv7 cf_mean results {} did not match baseline: {}'.format( gen.out['cf_mean'], baseline_cf_mean)) assert np.allclose(gen.out['cf_mean'], baseline_cf_mean), msg
def test_split_iter(): """Test Points_Control on two slices of ProjectPoints""" res_file = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5') sam_files = os.path.join(TESTDATADIR, 'SAM/wind_gen_standard_losses_0.json') pp = ProjectPoints(slice(0, 500, 5), sam_files, 'windpower', res_file=res_file) n = 3 for s, e in [(0, 50), (50, 100)]: pc = PointsControl.split(s, e, pp, sites_per_split=n) for i, pp_split in enumerate(pc): i0_nom = s + i * n i1_nom = s + i * n + n if i1_nom >= e: i1_nom = e split = pp_split.project_points.df target = pp.df.iloc[i0_nom:i1_nom] msg = 'PointsControl iterator split did not function correctly!' assert split.equals(target), msg
def _pp_to_pc(points, points_range, sam_files, tech, sites_per_worker=None, res_file=None, curtailment=None): """ Create ProjectControl from ProjectPoints Parameters ---------- points : slice | list | str | reV.config.project_points.PointsControl Slice specifying project points, or string pointing to a project points csv, or a fully instantiated PointsControl object. points_range : list | None Optional two-entry list specifying the index range of the sites to analyze. To be taken from the reV.config.PointsControl.split_range property. sam_files : dict | str | list | SAMConfig SAM input configuration ID(s) and file path(s). Keys are the SAM config ID(s), top level value is the SAM path. Can also be a single config file str. If it's a list, it is mapped to the sorted list of unique configs requested by points csv. Can also be a pre loaded SAMConfig object. tech : str SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, solarwaterheat, troughphysicalheat, lineardirectsteam) The string should be lower-cased with spaces and _ removed. sites_per_worker : int Number of sites to run in series on a worker. None defaults to the resource file chunk size. res_file : str Filepath to single resource file, multi-h5 directory, or /h5_dir/prefix*suffix curtailment : NoneType | dict | str | config.curtailment.Curtailment Inputs for curtailment parameters. If not None, curtailment inputs are expected. Can be: - Explicit namespace of curtailment variables (dict) - Pointer to curtailment config json file with path (str) - Instance of curtailment config object (config.curtailment.Curtailment) Returns ------- pc : reV.config.project_points.PointsControl PointsControl object instance. """ if not isinstance(points, ProjectPoints): # make Project Points instance pp = ProjectPoints(points, sam_files, tech=tech, res_file=res_file, curtailment=curtailment) else: pp = ProjectPoints(points.df, sam_files, tech=tech, res_file=res_file, curtailment=curtailment) # make Points Control instance if points_range is not None: # PointsControl is for just a subset of the project points... # this is the case if generation is being initialized on one # of many HPC nodes in a large project pc = PointsControl.split(points_range[0], points_range[1], pp, sites_per_split=sites_per_worker) else: # PointsControl is for all of the project points pc = PointsControl(pp, sites_per_split=sites_per_worker) return pc