def prj(request): if request.param == "arctic": connstr = "mongodb://localhost:27017/" name = "test_project" arc = arctic.Arctic(connstr) if name in [lib.split(".")[0] for lib in arc.list_libraries()]: connector = pst.ArcticConnector(name, connstr) prj = pst.PastaStore(name, connector) else: connector = pst.ArcticConnector(name, connstr) prj = initialize_project(connector) elif request.param == "pystore": name = "test_project" path = "./tests/data/pystore" pystore.set_path(path) if name in pystore.list_stores(): connector = pst.PystoreConnector(name, path) prj = pst.PastaStore(name, connector) else: connector = pst.PystoreConnector(name, path) prj = initialize_project(connector) elif request.param == "dict": name = "test_project" connector = pst.DictConnector(name) prj = initialize_project(connector) prj.type = request.param # added here for defining test dependencies yield prj
def initialize_project(conn): prj = pst.PastaStore("test_project", conn) # oseries 1 o = pd.read_csv("./tests/data/obs.csv", index_col=0, parse_dates=True) o.index.name = "oseries1" prj.conn.add_oseries(o, "oseries1", metadata={"x": 100000, "y": 400000}) # oseries 2 o = pd.read_csv("./tests/data/head_nb1.csv", index_col=0, parse_dates=True) o.index.name = "oseries2" prj.conn.add_oseries(o, "oseries2", metadata={"x": 100300, "y": 400400}) # prec 1 s = pd.read_csv("./tests/data/rain.csv", index_col=0, parse_dates=True) prj.conn.add_stress(s, "prec1", kind="prec", metadata={ "x": 100000, "y": 400000 }) # prec 2 s = pd.read_csv("./tests/data/rain_nb1.csv", index_col=0, parse_dates=True) prj.conn.add_stress(s, "prec2", kind="prec", metadata={ "x": 100300, "y": 400400 }) # evap 1 s = pd.read_csv("./tests/data/evap.csv", index_col=0, parse_dates=True) prj.conn.add_stress(s, "evap1", kind="evap", metadata={ "x": 100000, "y": 400000 }) # evap 2 s = pd.read_csv("./tests/data/evap_nb1.csv", index_col=0, parse_dates=True) prj.conn.add_stress(s, "evap2", kind="evap", metadata={ "x": 100300, "y": 400400 }) return prj
def create_pastastore(oc, pstore, pstore_name='', conn=pst.DictConnector("my_conn"), add_metadata=True, obs_column='stand_m_tov_nap', kind='oseries', verbose=False): """add observations to a new or existing pastastore Parameters ---------- oc : observation.ObsCollection collection of observations pstore : pastastore.PastasProject, optional Existing pastastore, if None a new project is created pstore_name : str, optional Name of the pastastore only used if pstore is None conn : pastastore.connectors connector for database obs_column : str, optional Name of the column in the Obs dataframe to be used kind : str, optional The kind of series that is added to the pastas project add_metadata : boolean, optional If True metadata from the observations added to the project. verbose : boolean, optional Print additional information to the screen (default is False). Returns ------- pstore : pastastore.PastasProject the pastas project with the series from the ObsCollection """ if pstore is None: pstore = pst.PastaStore(pstore_name, connector=conn) for o in oc.obs.values: if verbose: print('add to pastastore -> {}'.format(o.name)) if add_metadata: meta = _get_metadata_from_obs(o, verbose=verbose) else: meta = dict() if kind == 'oseries': pstore.conn.add_oseries(o[obs_column], o.name, metadata=meta) else: pstore.conn.add_stress(o[obs_column], o.name, kind, metadata=meta) return pstore
def build_model(conn): store = pst.PastaStore("test", conn) # oseries nb1 if "oseries_nb1" not in store.oseries.index: o = pd.read_csv("./tests/data/head_nb1.csv", index_col=0, parse_dates=True) store.add_oseries(o, "oseries_nb1", metadata={ "x": 100300, "y": 400400 }) # prec nb1 if "prec_nb1" not in store.stresses.index: s = pd.read_csv("./tests/data/rain_nb1.csv", index_col=0, parse_dates=True) store.add_stress(s, "prec_nb1", kind="prec", metadata={ "x": 100300, "y": 400400 }) # evap nb1 if "evap_nb1" not in store.stresses.index: s = pd.read_csv("./tests/data/evap_nb1.csv", index_col=0, parse_dates=True) store.add_stress(s, "evap_nb1", kind="evap", metadata={ "x": 100300, "y": 400400 }) ml = store.create_model("oseries_nb1", add_recharge=True) return ml
def example_pastastore(conn="DictConnector"): """Example dataset loaded into PastaStore. Parameters ---------- conn : str or Connector, optional name of Connector type, by default "DictConnector", which initializes a default Connector. If an Connector instance is passed, use that Connector. Returns ------- pstore : pastastore.PastaStore PastaStore containing example dataset """ # check it test dataset is available datadir = os.path.join(os.path.dirname(__file__), "../tests/data") if not os.path.exists(datadir): raise FileNotFoundError("Test datasets not available! " "Clone repository from GitHub.") # initialize default connector if conn is str if not isinstance(conn, BaseConnector): conn = _default_connector(conn) # initialize PastaStore pstore = pst.PastaStore("example", conn) # add data # oseries 1 o = pd.read_csv(os.path.join(datadir, "obs.csv"), index_col=0, parse_dates=True) pstore.add_oseries(o, "oseries1", metadata={"x": 165000, "y": 424000}) # oseries 2 o = pd.read_csv(os.path.join(datadir, "head_nb1.csv"), index_col=0, parse_dates=True) pstore.add_oseries(o, "oseries2", metadata={"x": 164000, "y": 423000}) # oseries 3 o = pd.read_csv(os.path.join(datadir, "gw_obs.csv"), index_col=0, parse_dates=True) pstore.add_oseries(o, "oseries3", metadata={"x": 165554, "y": 422685}) # prec 1 s = pd.read_csv(os.path.join(datadir, "rain.csv"), index_col=0, parse_dates=True) pstore.add_stress(s, "prec1", kind="prec", metadata={ "x": 165050, "y": 424050 }) # prec 2 s = pd.read_csv(os.path.join(datadir, "rain_nb1.csv"), index_col=0, parse_dates=True) pstore.add_stress(s, "prec2", kind="prec", metadata={ "x": 164010, "y": 423000 }) # evap 1 s = pd.read_csv(os.path.join(datadir, "evap.csv"), index_col=0, parse_dates=True) pstore.add_stress(s, "evap1", kind="evap", metadata={ "x": 164500, "y": 424000 }) # evap 2 s = pd.read_csv(os.path.join(datadir, "evap_nb1.csv"), index_col=0, parse_dates=True) pstore.add_stress(s, "evap2", kind="evap", metadata={ "x": 164000, "y": 423030 }) # well 1 s = pd.read_csv(os.path.join(datadir, "well.csv"), index_col=0, parse_dates=True) pstore.add_stress(s, "well1", kind="well", metadata={ "x": 164691, "y": 423579 }) # river notebook data (nb5) oseries = pd.read_csv(os.path.join(datadir, "nb5_head.csv"), parse_dates=True, index_col=0).squeeze("columns") pstore.add_oseries(oseries, "head_nb5", metadata={ "x": 200_000, "y": 450_000. }) rain = pd.read_csv(os.path.join(datadir, "nb5_prec.csv"), parse_dates=True, index_col=0).squeeze("columns") pstore.add_stress(rain, "prec_nb5", kind="prec", metadata={ "x": 200_000, "y": 450_000. }) evap = pd.read_csv(os.path.join(datadir, "nb5_evap.csv"), parse_dates=True, index_col=0).squeeze("columns") pstore.add_stress(evap, "evap_nb5", kind="evap", metadata={ "x": 200_000, "y": 450_000. }) waterlevel = pd.read_csv(os.path.join(datadir, "nb5_riv.csv"), parse_dates=True, index_col=0).squeeze("columns") pstore.add_stress(waterlevel, "riv_nb5", kind="riv", metadata={ "x": 200_000, "y": 450_000. }) # multiwell notebook data fname = os.path.join(datadir, 'MenyanthesTest.men') meny = ps.read.MenyData(fname) oseries = meny.H['Obsevation well']['values'].dropna() ometa = { "x": meny.H["Obsevation well"]['xcoord'], "y": meny.H["Obsevation well"]['ycoord'] } pstore.add_oseries(oseries, "head_mw", metadata=ometa) prec = meny.IN['Precipitation']['values'] prec.index = prec.index.round("D") prec.name = "prec" pmeta = { "x": meny.IN['Precipitation']['xcoord'], "y": meny.IN['Precipitation']['ycoord'] } pstore.add_stress(prec, "prec_mw", kind="prec", metadata=pmeta) evap = meny.IN['Evaporation']['values'] evap.index = evap.index.round("D") evap.name = "evap" emeta = { "x": meny.IN['Evaporation']['xcoord'], "y": meny.IN['Evaporation']['ycoord'] } pstore.add_stress(evap, "evap_mw", kind="evap", metadata=emeta) extraction_names = ['Extraction 2', 'Extraction 3'] for extr in extraction_names: wmeta = {"x": meny.IN[extr]["xcoord"], "y": meny.IN[extr]["ycoord"]} # replace spaces in names for Pastas name = extr.replace(" ", "_").lower() ts = meny.IN[extr]["values"] pstore.add_stress(ts, name, kind="well", metadata=wmeta) return pstore
def initialize_project(conn): pstore = pst.PastaStore("test_project", conn) # oseries 1 o = pd.read_csv("./tests/data/obs.csv", index_col=0, parse_dates=True) pstore.add_oseries(o, "oseries1", metadata={"x": 165000, "y": 424000}) # oseries 2 o = pd.read_csv("./tests/data/head_nb1.csv", index_col=0, parse_dates=True) pstore.add_oseries(o, "oseries2", metadata={"x": 164000, "y": 423000}) # oseries 3 o = pd.read_csv("./tests/data/gw_obs.csv", index_col=0, parse_dates=True) pstore.add_oseries(o, "oseries3", metadata={"x": 165554, "y": 422685}) # prec 1 s = pd.read_csv("./tests/data/rain.csv", index_col=0, parse_dates=True) pstore.add_stress(s, "prec1", kind="prec", metadata={ "x": 165050, "y": 424050 }) # prec 2 s = pd.read_csv("./tests/data/rain_nb1.csv", index_col=0, parse_dates=True) pstore.add_stress(s, "prec2", kind="prec", metadata={ "x": 164010, "y": 423000 }) # evap 1 s = pd.read_csv("./tests/data/evap.csv", index_col=0, parse_dates=True) pstore.add_stress(s, "evap1", kind="evap", metadata={ "x": 164500, "y": 424000 }) # evap 2 s = pd.read_csv("./tests/data/evap_nb1.csv", index_col=0, parse_dates=True) pstore.add_stress(s, "evap2", kind="evap", metadata={ "x": 164000, "y": 423030 }) # well 1 s = pd.read_csv("./tests/data/well.csv", index_col=0, parse_dates=True) pstore.add_stress(s, "well1", kind="well", metadata={ "x": 164691, "y": 423579 }) return pstore