Ejemplo n.º 1
0
def test_answer():
    filenames = sorted(DATA_DIR.glob("uvp_*"))

    #read meteo files
    df = pm.meteo(meteo_source=filenames,
                  engine='cfgrib',
                  combine_by='nested',
                  combine_forecast=True,
                  xr_kwargs={'concat_dim': 'step'})  # use combine
    df0 = pm.meteo(meteo_source=[filenames[0]],
                   engine='cfgrib')  # each one seperately
    df1 = pm.meteo(meteo_source=[filenames[1]],
                   engine='cfgrib')  # each one seperately
    df2 = pm.meteo(meteo_source=[filenames[2]],
                   engine='cfgrib')  # each one seperately
    df3 = pm.meteo(meteo_source=[filenames[3]],
                   engine='cfgrib')  # each one seperately

    #merge the single files
    joined = xr.concat([
        df0.Dataset.isel(time=slice(0, 12)),
        df1.Dataset.isel(time=slice(0, 12)),
        df2.Dataset.isel(time=slice(0, 12)), df3.Dataset
    ],
                       dim='time')

    assert joined.equals(df.Dataset)  # compare
Ejemplo n.º 2
0
def test_meteo(tmpdir, name):
    rpath = str(tmpdir) + '/'
    d = pmeteo.meteo(filename, engine='netcdf')
    d.to_output(solver='schism', rpath=rpath, split_by='day')
    d.to_output(solver='schism', rpath=rpath, filename='all.nc')

    # read schism meteo files
    files = glob.glob(rpath + 'sflux/*.nc')
    files.sort()
    ma = []
    for ifile in files:
        g = xr.open_dataset(ifile)
        ts = '-'.join(g.time.attrs['base_date'].astype(str)[:3])
        time_r = pd.to_datetime(ts)
        times = time_r + pd.to_timedelta(g.time.values, unit='D').round('H')
        g = g.assign_coords({'time': times})
        ma.append(g)

    b = xr.merge(ma)
    b.close()

    tlist = (pd.to_datetime(b.time.data) - pd.to_datetime(b.time.data[0])
             )  # convert to Schism's time coords
    tlist = tlist / pd.to_timedelta('1D')

    b = b.assign_coords({'time': tlist})

    al = xr.open_dataset(rpath + 'all.nc')

    assert b.equals(al)
Ejemplo n.º 3
0
def d3d(tmpdir, name):
    filename = (DATA_DIR / name).as_posix()
    #read meteo file
    df = pmeteo.meteo(meteo_source=filename, engine='cfgrib')

    rpath = str(tmpdir) + '/'
    #output to uvp files
    df.to_output(solver='d3d', rpath=rpath)

    #read again meteo
    m = pmodel(solver='d3d')

    p = m.from_force(rpath + 'p.amp', 'msl')
    u = m.from_force(rpath + 'u.amu', 'u10')
    v = m.from_force(rpath + 'v.amv', 'v10')

    dr = xr.merge([p, u, v])
    dr = dr.sortby('latitude', ascending=True)

    #compare
    df.Dataset = df.Dataset.sortby('latitude', ascending=True)

    assert np.abs(df.Dataset.msl.values - dr.msl.values).max() < 1e-3
    assert np.abs(df.Dataset.u10.values - dr.u10.values).max() < 1e-3
    assert np.abs(df.Dataset.v10.values - dr.v10.values).max() < 1e-3
Ejemplo n.º 4
0
def test_meteo(name):
    try:
        d = pmeteo.meteo(filename, engine='netcdf')
        r = True
    except:
        r = False

    assert r == True
Ejemplo n.º 5
0
    def force(self, **kwargs):

        meteo_source = get_value(self, kwargs, 'meteo_source', None)

        kwargs.update({'meteo_source': meteo_source})

        flag = get_value(self, kwargs, 'update', [])
        # check if files exist

        z = {**self.__dict__, **kwargs}  # merge self and possible kwargs

        if flag:
            if ('meteo' in flag) | ('all' in flag):
                self.meteo = pmeteo.meteo(**z)
            else:
                logger.info('skipping meteo files ..\n')
        else:
            self.meteo = pmeteo.meteo(**z)
Ejemplo n.º 6
0
def test_merge_strategy_last(meteo_paths, meteo_datasets):
    # In strategy "last" we want:
    # - the first 12 hours of all the datasets
    # - the rest of the hours of the last one
    expected = xr.concat(
        [
            *[ds.isel(time=slice(0, 12)) for ds in meteo_datasets],
            meteo_datasets[-1].isel(time=slice(12, None)),
        ],
        dim="time",
    )
    merged = pm.meteo(
        meteo_source=meteo_paths,
        meteo_engine="cfgrib",
        meteo_combine_by="nested",
        meteo_merge="last",
        meteo_xr_kwargs={"concat_dim": "step"},
    ).Dataset
    assert merged.equals(expected)
Ejemplo n.º 7
0
def test_merge_strategy_first(meteo_paths, meteo_datasets):
    # In strategy "first" we want:
    # - the first 13 hours of the first meteo
    # - hours 1-13 of all subsequent meteos
    # - hours 13-end of the last meteo
    expected = xr.concat(
        [
            meteo_datasets[0].isel(time=slice(0, 13)),
            *[ds.isel(time=slice(1, 13)) for ds in meteo_datasets[1:]],
            meteo_datasets[-1].isel(time=slice(13, None)),
        ],
        dim="time",
    )
    merged = pm.meteo(
        meteo_source=meteo_paths,
        meteo_engine="cfgrib",
        meteo_combine_by="nested",
        meteo_merge="first",
        meteo_xr_kwargs={"concat_dim": "step"},
    ).Dataset
    assert merged.equals(expected)
Ejemplo n.º 8
0
def schism(tmpdir, name):
    filename = (DATA_DIR / name).as_posix()
    #read meteo file
    df = pmeteo.meteo(meteo_source=filename, engine='cfgrib')
    df.Dataset = df.Dataset.sortby('latitude', ascending=True)

    rpath = str(tmpdir) + '/'
    #output to uvp files
    df.to_output(solver='schism', rpath=rpath)

    #read again meteo
    path = rpath + '/sflux/'
    dr = xr.open_dataset(path + '/sflux_air_1.0001.nc')

    #cleanup
    #    try:
    #        shutil.rmtree(path)
    #    except OSError as e:
    #        print ("Error: %s - %s." % (e.filename, e.strerror))

    #compare
    assert np.array_equal(df.Dataset.msl.values, dr.prmsl.values)
    assert np.array_equal(df.Dataset.u10.values, dr.uwind.values)
    assert np.array_equal(df.Dataset.v10.values, dr.vwind.values)
Ejemplo n.º 9
0
def schism(tmpdir):
    #initialize a model
    rpath = str(tmpdir)+'/schism/'
    case.update({'rpath':rpath+'20181001.00/'}) # use tmpdir for running the model

    b = pyPoseidon.model(**case)

    b.execute()

    #creating a time sequence of the runs
    start_date = pd.to_datetime('2018-10-1 0:0:0')
    end_date = pd.to_datetime('2018-10-2 0:0:0')
    date_list = pd.date_range(start_date,end_date, freq='12H')

    #creating a sequence of folder to store the runs. In this case we name them after the date attribute.
    #NOTE that the first folder is the fisrt run already perfomed!!
    rpaths = [rpath + datetime.datetime.strftime(x, '%Y%m%d.%H') +'/' for x in date_list]

    #creating a sequence of folder from which we read the meteo.
    meteo = []
    for date in date_list:
        prev_date= pd.to_datetime(date) - pd.to_timedelta('12H')
        prev_date = prev_date.strftime(format='%Y-%m-%d %H:%M:%S')
        dr = pd.date_range(prev_date, date, freq='12H')
        names = ['uvp_'+ datetime.datetime.strftime(x, '%Y%m%d%H') + '.grib' for x in dr]
        dur = [ (DATA_DIR / name).as_posix() for name in names ] 
        meteo.append(dur)

    #set cast
    for l in range(len(rpaths)-1):
        h = cast.cast(solver='schism',model=b,ppath=rpaths[l],cpath=rpaths[l+1],meteo=meteo[l+1], date=date_list[l+1])
        h.set(execute=True) # execute

    # Run check case - Total duration
    check.update({'rpath':rpath+'check/'}) # use tmpdir for running the model

    # Combine meteo appropriately

    m1 = pm.meteo(meteo_source=METEO_FILES_2[0],meteo_engine='cfgrib')
    m2 = pm.meteo(meteo_source=METEO_FILES_2[1],meteo_engine='cfgrib')
    m3 = pm.meteo(meteo_source=METEO_FILES_2[2],meteo_engine='cfgrib')
    m4 = pm.meteo(meteo_source=METEO_FILES_2[3],meteo_engine='cfgrib')

    # extract correct chunk

    w1 = m1.Dataset.isel(time=slice(0,13))
    w2 = m2.Dataset.isel(time=slice(1,13)) # note that we keep the 12 hour from the previous file
    w3 = m3.Dataset.isel(time=slice(1,13))
    w4 = m4.Dataset.isel(time=slice(1,13))

    #combine
    meteo = xr.combine_by_coords([w1,w2,w3,w4],combine_attrs='override')
    #saving
    check.update({'meteo_source' : meteo})
    
    c = pyPoseidon.model(**check)

    c.execute()

    # COMPARE
    output = data.data(folders=rpaths,solver='schism')

    total = data.data(folders=[rpath+'check/'],solver='schism')

    r = output.Dataset.isel(time=slice(0,36))
    

    rb = []
    for var in total.Dataset.data_vars:
        if not total.Dataset[var].equals(r[var]):
            rb.append(var)

    print(rb)


#    flag = True TODO
#    for var in rb:
#        flag = False
#        mdif = np.abs(total.results.Dataset[var].values - output.results.Dataset[var].values).max()
#        if mdif < 1.e-14 :
#            flag = True
#    print(mdif)

    if (rb == ['zcor']) or rb==[]:
        return True
    else:
        return False
Ejemplo n.º 10
0
def meteo_datasets(meteo_paths) -> List[pm.meteo]:
    return [pm.meteo(meteo_source=path.as_posix(), meteo_engine="cfgrib").Dataset for path in meteo_paths]
Ejemplo n.º 11
0
def schism(tmpdir):
    #initialize a model
    rpath = str(tmpdir) + '/schism/'
    case.update({'rpath':
                 rpath + '20181001.00/'})  # use tmpdir for running the model

    #creating a time sequence of the runs
    start_date = pd.to_datetime('2018-10-1 0:0:0')
    end_date = pd.to_datetime('2018-10-2 0:0:0')
    date_list = pd.date_range(start_date, end_date, freq='12H')

    m0 = pm.meteo(meteo_source=METEO_FILES_1, engine='cfgrib')

    case.update({'meteo_source': m0.Dataset})

    b = pyPoseidon.model(**case)

    b.execute()

    # run the cast
    with open(rpath + '20181001.00/schism_model.json', 'rb') as f:
        info = pd.read_json(f, lines=True).T
        info[info.isnull().values] = None
        info = info.to_dict()[0]

    info.update({'path': rpath})

    #append to dic
    info.update({
        'start_date': start_date,
        'end_date': end_date,
        'dates': date_list
    })

    #creating a sequence of folder to store the runs. In this case we name them after the date attribute.
    #NOTE that the first folder is the fisrt run already perfomed!!
    folders = [datetime.datetime.strftime(x, '%Y%m%d.%H') for x in date_list]
    info.update({'folders': folders})

    #creating a sequence of folder from which we read the meteo.
    meteo = [m0.Dataset]
    for date in date_list[1:]:
        end_date = pd.to_datetime(date) + pd.to_timedelta(info['time_frame'])
        end_date = end_date.strftime(format='%Y-%m-%d %H:%M:%S')
        dr = [date - pd.to_timedelta('12H'), date]
        names = [
            'uvp_' + datetime.datetime.strftime(x, '%Y%m%d%H') + '.grib'
            for x in dr
        ]
        dur = [(DATA_DIR / name).as_posix() for name in names]
        m1 = pm.meteo(meteo_source=dur[0], engine='cfgrib')
        m2 = pm.meteo(meteo_source=dur[1], engine='cfgrib')
        w1 = m1.Dataset.isel(time=slice(12, 13))
        w2 = m2.Dataset.isel(time=slice(
            1, None))  # note that we keep the 12 hour from the previous file
        mf = xr.combine_by_coords([w1, w2])
        meteo.append(mf)

    info.update({'meteo_source': meteo})

    info['time_frame'] = len(folders) * [info['time_frame']]

    #set cast
    h = cast.cast(**info)  # initialize

    h.run()

    # Run check case - Total duration
    check.update({'rpath':
                  rpath + 'check/'})  # use tmpdir for running the model

    # Combine meteo appropriately

    m1 = pm.meteo(meteo_source=METEO_FILES_2[0], engine='cfgrib')
    m2 = pm.meteo(meteo_source=METEO_FILES_2[1], engine='cfgrib')
    m3 = pm.meteo(meteo_source=METEO_FILES_2[2], engine='cfgrib')
    m4 = pm.meteo(meteo_source=METEO_FILES_2[3], engine='cfgrib')

    # extract correct chunk

    w1 = m1.Dataset.isel(time=slice(0, 13))
    w2 = m2.Dataset.isel(time=slice(
        1, 13))  # note that we keep the 12 hour from the previous file
    w3 = m3.Dataset.isel(time=slice(1, 13))
    w4 = m4.Dataset.isel(time=slice(1, 13))

    #combine
    meteo = xr.combine_by_coords([w1, w2, w3, w4])
    #saving
    check.update({'meteo_source': meteo})

    c = pyPoseidon.model(**check)

    c.execute()

    # COMPARE
    folders = [info['path'] + f for f in info['folders']]
    output = data.data(folders=folders, solver='schism')

    total = data.data(folders=[rpath + 'check/'], solver='schism')

    r = output.Dataset.isel(time=slice(0, 36))

    rb = []
    for var in total.Dataset.data_vars:
        if not total.Dataset[var].equals(r[var]):
            rb.append(var)

    print(rb)

    #    flag = True TODO
    #    for var in rb:
    #        flag = False
    #        mdif = np.abs(total.results.Dataset[var].values - output.results.Dataset[var].values).max()
    #        if mdif < 1.e-14 :
    #            flag = True
    #    print(mdif)

    if (rb == ['zcor']) or rb == []:
        return True
    else:
        return False