Example #1
0
def d3d(tmpdir, name):
    filename = (DATA_DIR / name).as_posix()
    #read meteo file
    df = pmeteo.meteo(meteo_source=filename, meteo_engine='cfgrib')

    rpath = str(tmpdir) + '/'
    #output to uvp files
    df.to_output(solver='d3d', rpath=rpath)

    #read again meteo
    m = pmodel(solver='d3d')

    p = m.from_force(rpath + 'p.amp', 'msl')
    u = m.from_force(rpath + 'u.amu', 'u10')
    v = m.from_force(rpath + 'v.amv', 'v10')

    dr = xr.merge([p, u, v])
    dr = dr.sortby('latitude', ascending=True)

    #compare
    df.Dataset = df.Dataset.sortby('latitude', ascending=True)

    assert np.abs(df.Dataset.msl.values - dr.msl.values).max() < 1e-3
    assert np.abs(df.Dataset.u10.values - dr.u10.values).max() < 1e-3
    assert np.abs(df.Dataset.v10.values - dr.v10.values).max() < 1e-3
Example #2
0
def test_meteo(tmpdir, name):
    rpath = str(tmpdir) + "/"
    d = pmeteo.meteo(filename, meteo_engine="netcdf")
    d.to_output(solver="schism", rpath=rpath, meteo_split_by="day")
    d.to_output(solver="schism", rpath=rpath, filename="all.nc")

    # read schism meteo files
    files = glob.glob(rpath + "sflux/*.nc")
    files.sort()
    ma = []
    for ifile in files:
        g = xr.open_dataset(ifile)
        ts = "-".join(g.time.attrs["base_date"].astype(str)[:3])
        time_r = pd.to_datetime(ts)
        times = time_r + pd.to_timedelta(g.time.values, unit="D").round("H")
        g = g.assign_coords({"time": times})
        ma.append(g)

    b = xr.merge(ma)
    b.close()

    tlist = pd.to_datetime(b.time.data) - pd.to_datetime(b.time.data[0])  # convert to Schism's time coords
    tlist = tlist / pd.to_timedelta("1D")

    b = b.assign_coords({"time": tlist})

    al = xr.open_dataset(rpath + "all.nc")

    assert b.equals(al)
Example #3
0
def test_meteo(tmpdir,name):
    rpath = str(tmpdir)+'/'
    d = pmeteo.meteo(filename, meteo_engine='netcdf')
    d.to_output(solver='schism',rpath=rpath,meteo_split_by='day')
    d.to_output(solver='schism',rpath=rpath,filename='all.nc')

    # read schism meteo files
    files = glob.glob(rpath + 'sflux/*.nc')
    files.sort()
    ma = []
    for ifile in files:
        g = xr.open_dataset(ifile)
        ts = '-'.join(g.time.attrs['base_date'].astype(str)[:3])
        time_r = pd.to_datetime(ts)
        times = time_r + pd.to_timedelta(g.time.values,unit='D').round('H')
        g = g.assign_coords({'time':times})
        ma.append(g)

    b = xr.merge(ma)
    b.close()

    tlist = (pd.to_datetime(b.time.data) - pd.to_datetime(b.time.data[0])) # convert to Schism's time coords
    tlist = tlist/pd.to_timedelta('1D')

    b = b.assign_coords({'time':tlist})

    al = xr.open_dataset(rpath + 'all.nc')

    assert b.equals(al)
Example #4
0
def d3d(tmpdir, name):
    filename = (DATA_DIR / name).as_posix()
    # read meteo file
    df = pmeteo.meteo(meteo_source=filename, meteo_engine="cfgrib")

    rpath = str(tmpdir) + "/"
    # output to uvp files
    df.to_output(solver="d3d", rpath=rpath)

    # read again meteo
    m = pmodel(solver="d3d")

    p = m.from_force(rpath + "p.amp", "msl")
    u = m.from_force(rpath + "u.amu", "u10")
    v = m.from_force(rpath + "v.amv", "v10")

    dr = xr.merge([p, u, v])
    dr = dr.sortby("latitude", ascending=True)

    # compare
    df.Dataset = df.Dataset.sortby("latitude", ascending=True)

    assert np.abs(df.Dataset.msl.values - dr.msl.values).max() < 1e-3
    assert np.abs(df.Dataset.u10.values - dr.u10.values).max() < 1e-3
    assert np.abs(df.Dataset.v10.values - dr.v10.values).max() < 1e-3
Example #5
0
def test_meteo(name):
    try:
        d = pmeteo.meteo(filename, meteo_engine="netcdf")
        r = True
    except:
        r = False

    assert r == True
Example #6
0
    def force(self, **kwargs):

        meteo_source = get_value(self, kwargs, 'meteo_source', None)

        kwargs.update({'meteo_source': meteo_source})

        flag = get_value(self, kwargs, 'update', [])
        # check if files exist

        z = {**self.__dict__, **kwargs}  # merge self and possible kwargs

        if flag:
            if ('meteo' in flag) | ('all' in flag):
                self.meteo = pmeteo.meteo(**z)
            else:
                logger.info('skipping meteo files ..\n')
        else:
            self.meteo = pmeteo.meteo(**z)
Example #7
0
    def force(self, **kwargs):

        meteo_source = get_value(self, kwargs, "meteo_source", None)

        kwargs.update({"meteo_source": meteo_source})

        flag = get_value(self, kwargs, "update", [])
        # check if files exist

        z = {**self.__dict__, **kwargs}  # merge self and possible kwargs

        if flag:
            if ("meteo" in flag) | ("all" in flag):
                self.meteo = pmeteo.meteo(**z)
            else:
                logger.info("skipping meteo files ..\n")
        else:
            self.meteo = pmeteo.meteo(**z)
def test_merge_strategy_last(meteo_paths, meteo_datasets):
    # In strategy "last" we want:
    # - the first 12 hours of all the datasets
    # - the rest of the hours of the last one
    expected = xr.concat(
        [
            *[ds.isel(time=slice(0, 12)) for ds in meteo_datasets],
            meteo_datasets[-1].isel(time=slice(12, None)),
        ],
        dim="time",
    )
    merged = pm.meteo(
        meteo_source=meteo_paths,
        meteo_engine="cfgrib",
        meteo_combine_by="nested",
        meteo_merge="last",
        meteo_xr_kwargs={
            "concat_dim": "step"
        },
    ).Dataset
    assert merged.equals(expected)
def test_merge_strategy_first(meteo_paths, meteo_datasets):
    # In strategy "first" we want:
    # - the first 13 hours of the first meteo
    # - hours 1-13 of all subsequent meteos
    # - hours 13-end of the last meteo
    expected = xr.concat(
        [
            meteo_datasets[0].isel(time=slice(0, 13)),
            *[ds.isel(time=slice(1, 13)) for ds in meteo_datasets[1:]],
            meteo_datasets[-1].isel(time=slice(13, None)),
        ],
        dim="time",
    )
    merged = pm.meteo(
        meteo_source=meteo_paths,
        meteo_engine="cfgrib",
        meteo_combine_by="nested",
        meteo_merge="first",
        meteo_xr_kwargs={
            "concat_dim": "step"
        },
    ).Dataset
    assert merged.equals(expected)
Example #10
0
def schism(tmpdir, name):
    filename = (DATA_DIR / name).as_posix()
    #read meteo file
    df = pmeteo.meteo(meteo_source=filename, meteo_engine='cfgrib')
    df.Dataset = df.Dataset.sortby('latitude', ascending=True)

    rpath = str(tmpdir) + '/'
    #output to uvp files
    df.to_output(solver='schism', rpath=rpath)

    #read again meteo
    path = rpath + '/sflux/'
    dr = xr.open_dataset(path + '/sflux_air_1.0001.nc')

    #cleanup
    #    try:
    #        shutil.rmtree(path)
    #    except OSError as e:
    #        print ("Error: %s - %s." % (e.filename, e.strerror))

    #compare
    assert np.array_equal(df.Dataset.msl.values, dr.prmsl.values)
    assert np.array_equal(df.Dataset.u10.values, dr.uwind.values)
    assert np.array_equal(df.Dataset.v10.values, dr.vwind.values)
def meteo_datasets(meteo_paths) -> List[pm.meteo]:
    return [
        pm.meteo(meteo_source=path.as_posix(), meteo_engine="cfgrib").Dataset
        for path in meteo_paths
    ]
def schism(tmpdir):
    # initialize a model
    rpath = str(tmpdir) + "/schism/"
    case.update({"rpath":
                 rpath + "20181001.00/"})  # use tmpdir for running the model

    b = pyposeidon.model(**case)

    b.execute()

    # creating a time sequence of the runs
    start_date = pd.to_datetime("2018-10-1 0:0:0")
    end_date = pd.to_datetime("2018-10-2 0:0:0")
    date_list = pd.date_range(start_date, end_date, freq="12H")

    # creating a sequence of folder to store the runs. In this case we name them after the date attribute.
    # NOTE that the first folder is the fisrt run already perfomed!!
    rpaths = [
        rpath + datetime.datetime.strftime(x, "%Y%m%d.%H") + "/"
        for x in date_list
    ]

    # creating a sequence of folder from which we read the meteo.
    meteo = []
    for date in date_list:
        prev_date = pd.to_datetime(date) - pd.to_timedelta("12H")
        prev_date = prev_date.strftime(format="%Y-%m-%d %H:%M:%S")
        dr = pd.date_range(prev_date, date, freq="12H")
        names = [
            "uvp_" + datetime.datetime.strftime(x, "%Y%m%d%H") + ".grib"
            for x in dr
        ]
        dur = [(DATA_DIR / name).as_posix() for name in names]
        meteo.append(dur)

    # set cast
    for l in range(len(rpaths) - 1):
        h = cast.cast(solver="schism",
                      model=b,
                      ppath=rpaths[l],
                      cpath=rpaths[l + 1],
                      meteo=meteo[l + 1],
                      date=date_list[l + 1])
        h.set(execute=True)  # execute

    # Run check case - Total duration
    check.update({"rpath":
                  rpath + "check/"})  # use tmpdir for running the model

    # Combine meteo appropriately

    m1 = pm.meteo(meteo_source=METEO_FILES_2[0], meteo_engine="cfgrib")
    m2 = pm.meteo(meteo_source=METEO_FILES_2[1], meteo_engine="cfgrib")
    m3 = pm.meteo(meteo_source=METEO_FILES_2[2], meteo_engine="cfgrib")
    m4 = pm.meteo(meteo_source=METEO_FILES_2[3], meteo_engine="cfgrib")

    # extract correct chunk

    w1 = m1.Dataset.isel(time=slice(0, 13))
    w2 = m2.Dataset.isel(time=slice(
        1, 13))  # note that we keep the 12 hour from the previous file
    w3 = m3.Dataset.isel(time=slice(1, 13))
    w4 = m4.Dataset.isel(time=slice(1, 13))

    # combine
    meteo = xr.combine_by_coords([w1, w2, w3, w4], combine_attrs="override")
    # saving
    check.update({"meteo_source": meteo})

    c = pyposeidon.model(**check)

    c.execute()

    # COMPARE
    output = data.data(folders=rpaths, solver="schism")

    total = data.data(folders=[rpath + "check/"], solver="schism")

    r = output.Dataset.isel(time=slice(0, 36))

    rb = []
    for var in total.Dataset.data_vars:
        if not total.Dataset[var].equals(r[var]):
            rb.append(var)

    print(rb)

    #    flag = True TODO
    #    for var in rb:
    #        flag = False
    #        mdif = np.abs(total.results.Dataset[var].values - output.results.Dataset[var].values).max()
    #        if mdif < 1.e-14 :
    #            flag = True
    #    print(mdif)

    if (rb == ["zcor"]) or rb == []:
        return True
    else:
        return False