def test_io_netcdf(): ## Read h4 = hydro().rd_netcdf(path.join(py_dir, netcdf1)) h4._base_stats_fun() assert (len(h4._base_stats) > 4) ## Write h4.to_netcdf(path.join(py_dir, netcdf1)) ## Read h4 = hydro().rd_netcdf(path.join(py_dir, netcdf1)) h4._base_stats_fun() assert (len(h4._base_stats) > 4)
def test_io_csv(csv): tparam = param[csv].copy() ## Read h1 = hydro().rd_csv(path.join(py_dir, csv), **tparam) h1._base_stats_fun() assert (len(h1._base_stats) > 4) ## Write dformat = tparam['dformat'] out_param = {} if dformat == 'long': out_param.update({'pivot': False}) else: out_param.update({'pivot': True}) h1.to_csv(path.join(py_dir, csv), **out_param) ## Read h1 = hydro().rd_csv(path.join(py_dir, csv), **tparam) h1._base_stats_fun() assert (len(h1._base_stats) > 4)
Created on Thu Oct 26 15:34:32 2017 @author: MichaelEK """ from core.classes.hydro import hydro import pytest from os import path, getcwd from geopandas import read_file py_dir = path.realpath(path.join(getcwd(), path.dirname(__file__))) netcdf1 = 'test_netcdf1.nc' poly_shp = 'test_poly.shp' ## Read in data h1 = hydro().rd_netcdf(path.join(py_dir, netcdf1)) h1._base_stats_fun() stats = h1._base_stats ## Test selection options poly = read_file(path.join(py_dir, poly_shp)).set_index('site') @pytest.mark.parametrize('poly_in', [path.join(py_dir, poly_shp), poly]) def test_sel_by_poly(poly_in): h2 = h1.sel_ts_by_poly(poly_in, 100, pivot=True) assert (len(h2.columns) == 4) h3 = h1.sel_by_poly(poly_in, 100) h3._base_stats_fun() assert (len(h3._base_stats) == 4) h4 = h1.sel_ts_by_poly(poly_in,
malf_csv = 'malf.csv' alf_csv = 'alf.csv' days_mis_csv = 'alf_days_mis.csv' ## Plotting start = '1986-07-01' end = '1987-06-30' x_period = 'month' time_format = '%d-%m-%Y' flow_sites = 70105 ################################################ #### Import data h1 = hydro().get_data(mtypes=mtypes1, sites=sites1, qual_codes=qual_codes) ################################################ #### Tools ### Flow tools ## MALF and flow stats fstats = h1.stats(mtypes=mtypes1) fstats malf1 = h1.malf7d() malf1 malf3 = h1.malf7d(intervals=intervals) malf3
mtypes4 = 'gwl' mtypes5 = 'gwl_m' mtypes6 = 'usage' mtypes7 = 'flow_tel' sites1 = [70105, 69607, 69602, 65101, 69505] sites2 = [66, 137] sites3 = ['BT27/5020'] sites4 = ['J38/0774', 'J38/0874', 'J38/0811', 'I39/0033'] qual_codes = [10, 18, 20, 30, 50] from_date = '2015-01-01' to_date = '2017-06-30' poly = r'S:\Surface Water\backups\MichaelE\Projects\otop\GIS\vector\min_flow\catch1.shp' ### From the MSSQL server (the easy way) - Loads in both the time series data and the geo locations h1 = hydro().get_data(mtypes=mtypes1, sites=sites1, qual_codes=qual_codes) h2 = h1.get_data(mtypes=mtypes2, sites=sites2, qual_codes=qual_codes) h3 = h2.get_data(mtypes=mtypes3, sites=sites1, qual_codes=qual_codes) gwl1 = hydro().get_data(mtypes=mtypes4, sites=sites3, qual_codes=qual_codes) gwl2 = hydro().get_data(mtypes=mtypes5, sites=sites3) use1 = hydro().get_data(mtypes=mtypes6, sites=sites4) tel1 = hydro().get_data(mtypes=mtypes7, sites=sites1, from_date=from_date, to_date=to_date) ## Find sites based on a polygon shapefile with a 100 m buffer distance (for m_flow) h4 = hydro().get_data(mtypes=[mtypes1, mtypes2],
#### Load data ### Parameters mtypes1 = 'gwl_m' mtypes2 = 'gwl' sites1 = ['K37/3556'] sites2 = ['L36/0633'] qual_codes = [10, 18, 20, 50] from_date = '2015-01-01' to_date = '2017-06-30' poly = r'P:\examples\regression_tests\ashburton.shp' ### From the MSSQL server (the easy way) - Loads in both the time series data and the geo locations h1 = hydro().get_data(mtypes=mtypes1, sites=sites2) h2 = h1.get_data(mtypes=mtypes2, sites=sites2, qual_codes=qual_codes) ## Find sites based on a polygon shapefile with a 10 m buffer distance h4 = h1.get_data(mtypes=mtypes2, sites=poly, buffer_dis=10, qual_codes=qual_codes) ################################################## #### Look at the attributes and data contained in the new object ## Look at the general stats of the imported data print(h1) print(h2) h2
out_param = {} if dformat == 'long': out_param.update({'pivot': False}) else: out_param.update({'pivot': True}) h1.to_csv(path.join(py_dir, csv), **out_param) ## Read h1 = hydro().rd_csv(path.join(py_dir, csv), **tparam) h1._base_stats_fun() assert (len(h1._base_stats) > 4) ## Base import tparam = param[csv_files[0]] h1 = hydro().rd_csv(path.join(py_dir, csv_files[0]), **tparam) h1._base_stats_fun() h1_len = len(h1._base_stats) ## Combine test h2 = hydro().rd_csv(path.join(py_dir, extra_csv), **tparam) h2._base_stats_fun() h2_len = len(h2._base_stats) def test_combine(): h3 = h1.combine(h2) h3._base_stats_fun() assert (len(h3._base_stats) == (h1_len + h2_len))
def test_ecan_get_data_atmos(mtypes): h1 = hydro().get_data(mtypes=mtypes, sites=sites4, from_date=from_date, to_date=to_date, qual_codes=qual_codes, min_count=min_count, resample_code=resample_code) h1._base_stats_fun() assert (len(h1._base_stats) == 1)
def test_ecan_get_data_usage(): h1 = hydro().get_data(mtypes=mtypes3, sites=sites3, to_date=to_date, qual_codes=qual_codes) h1._base_stats_fun() assert (len(h1._base_stats) == 1)
def test_ecan_get_data_aq(mtypes): h1 = hydro().get_data(mtypes=mtypes, sites=sites2, from_date=from_date, to_date=to_date, qual_codes=qual_codes) h1._base_stats_fun() assert (len(h1._base_stats) == 1)