def d3d(tmpdir): # initialize a model rpath = str(tmpdir) + "/d3d/" case.update({"rpath": rpath + "/20181001.00/"}) # use tmpdir for running the model b = pyposeidon.model(**case) b.execute() # creating a time sequence of the runs start_date = pd.to_datetime("2018-10-1 0:0:0") end_date = pd.to_datetime("2018-10-2 0:0:0") date_list = pd.date_range(start_date, end_date, freq="12H") # creating a sequence of folder to store the runs. In this case we name them after the date attribute. # NOTE that the first folder is the fisrt run already perfomed!! rpaths = [rpath + datetime.datetime.strftime(x, "%Y%m%d.%H") + "/" for x in date_list] # set meteo files meteo = [] for date in date_list: end_date = pd.to_datetime(date) + pd.to_timedelta("12H") end_date = end_date.strftime(format="%Y-%m-%d %H:%M:%S") dr = pd.date_range(date, end_date, freq="12H") names = ["uvp_" + datetime.datetime.strftime(x, "%Y%m%d%H") + ".grib" for x in dr] dur = [(DATA_DIR / name).as_posix() for name in names] meteo.append(dur) # set cast for l in range(len(rpaths) - 1): h = cast.cast( solver="d3d", model=b, ppath=rpaths[l], cpath=rpaths[l + 1], meteo=meteo[l + 1], date=date_list[l + 1] ) h.set(execute=True) # execute # Run check case - Total duration check.update({"rpath": rpath + "check/"}) # use tmpdir for running the model c = pyposeidon.model(**check) c.execute() # COMPARE output = data.data(folders=rpaths, solver="d3d") total = data.data(folders=[rpath + "check/"], solver="d3d") test = True rb = [] for var in total.Dataset.data_vars: if not total.Dataset[var].equals(output.Dataset[var]): rb.append(var) if np.abs(total.Dataset[var].values - output.Dataset[var].values).max() > 1.0e-6: test = False print(rb) return test
def d3d(tmpdir,dic): #initialize a model rpath = str(tmpdir)+'/' dic.update({'rpath':rpath}) # use tmpdir for running the model b = pyposeidon.model(**dic) try: b.execute() out = data.data(**dic) a = pyposeidon.read_model(rpath+'d3d_model.json') # read model a.execute() out = data.data(**dic) return True except: return False
def schism(tmpdir, dic): #initialize a model rpath = str(tmpdir) + '/' dic.update({'rpath': rpath}) # use tmpdir for running the model b = pyposeidon.model(**dic) try: b.execute() b.results() a = pyposeidon.read_model(rpath + 'test_model.json') # read model a.execute() a.results() return True except: return False
def schism(tmpdir, case): #initialize a model dic = { 'solver': 'schism', 'geometry': case, 'manning': .12, 'windrot': 0.00001, 'tag': 'test', 'start_date': '2017-10-1 0:0:0', 'time_frame': '12H', 'meteo_source': [DATA_DIR / 'erai.grib'], #meteo file 'meteo_engine': 'cfgrib', 'dem_source': DEM_FILE, 'ncores': NCORES, #number of cores 'update': ['all'], #update only meteo, keep dem 'parameters': { 'dt': 400, 'rnday': 0.3, 'nhot': 0, 'ihot': 0, 'nspool': 9, 'ihfskip': 36, 'nhot_write': 108 } } rpath = str(tmpdir) + '/' dic.update({'rpath': rpath}) # use tmpdir for running the model b = pyposeidon.model(**dic) try: b.execute() b.results() return True except: return False
def schism(tmpdir, case): # initialize a model dic = { "solver": "schism", "geometry": case, "manning": 0.12, "windrot": 0.00001, "tag": "test", "start_date": "2017-10-1 0:0:0", "time_frame": "12H", "meteo_source": [DATA_DIR / "erai.grib"], # meteo file "meteo_engine": "cfgrib", "dem_source": DEM_FILE, "ncores": NCORES, # number of cores "update": ["all"], # update only meteo, keep dem "parameters": { "dt": 400, "rnday": 0.3, "nhot": 0, "ihot": 0, "nspool": 9, "ihfskip": 36, "nhot_write": 108 }, } rpath = str(tmpdir) + "/" dic.update({"rpath": rpath}) # use tmpdir for running the model b = pyposeidon.model(**dic) try: b.execute() b.results() return True except: return False
def schism(tmpdir): #initialize a model rpath = str(tmpdir) + '/schism/' case.update({'rpath': rpath + '20181001.00/'}) # use tmpdir for running the model b = pyposeidon.model(**case) b.execute() #creating a time sequence of the runs start_date = pd.to_datetime('2018-10-1 0:0:0') end_date = pd.to_datetime('2018-10-2 0:0:0') date_list = pd.date_range(start_date, end_date, freq='12H') #creating a sequence of folder to store the runs. In this case we name them after the date attribute. #NOTE that the first folder is the fisrt run already perfomed!! rpaths = [ rpath + datetime.datetime.strftime(x, '%Y%m%d.%H') + '/' for x in date_list ] #creating a sequence of folder from which we read the meteo. meteo = [] for date in date_list: end_date = pd.to_datetime(date) + pd.to_timedelta('12H') end_date = end_date.strftime(format='%Y-%m-%d %H:%M:%S') dr = pd.date_range(date, end_date, freq='12H') names = [ 'uvp_' + datetime.datetime.strftime(x, '%Y%m%d%H') + '.grib' for x in dr ] dur = [(DATA_DIR / name).as_posix() for name in names] meteo.append(dur) #set cast for l in range(len(rpaths) - 1): h = cast.cast(solver='schism', model=b, ppath=rpaths[l], cpath=rpaths[l + 1], meteo=meteo[l + 1], date=date_list[l + 1]) h.set(execute=True) # execute # Run check case - Total duration check.update({'rpath': rpath + 'check/'}) # use tmpdir for running the model c = pyposeidon.model(**check) c.execute() # COMPARE output = data.data(folders=rpaths, solver='schism') total = data.data(folders=[rpath + 'check/'], solver='schism') rb = [] for var in total.Dataset.data_vars: if not total.Dataset[var].equals(output.Dataset[var]): rb.append(var) print(rb) # flag = True TODO # for var in rb: # flag = False # mdif = np.abs(total.results.Dataset[var].values - output.results.Dataset[var].values).max() # if mdif < 1.e-14 : # flag = True # print(mdif) if (rb == ['zcor']) or rb == []: return True else: return False
def __init__(self, **kwargs): for attr, value in kwargs.iteritems(): setattr(self, attr, value) logging.basicConfig(filename=self.path + self.case + '.log', level=logging.INFO) parent = kwargs.get('parent', '.') # load model with open(parent + 'info.pkl', 'r') as f: info = pickle.load(f) info['minlon'] = kwargs.get('minlon', None) info['maxlon'] = kwargs.get('maxlon', None) info['minlat'] = kwargs.get('minlat', None) info['maxlat'] = kwargs.get('maxlat', None) info['rpath'] = kwargs.get('rpath', info['rpath'] + './nested/') rpath = info['rpath'] #save for later info['resolution'] = kwargs.get('resolution', None) info['atm'] = False #create new case nest = model(**info) nest.set() #setup nested run nest.output() #output to run folder check = [ os.path.exists(parent + f) for f in ['u.amu', 'v.amv', 'p.amp'] ] if np.any(check) == False: nest.force() nest.uvp() #write u,v,p files else: #link u,v,p for filename in ['u.amu', 'v.amv', 'p.amp']: os.symlink(parent + filename, rpath + filename) # modify mdf file inp, order = mdf.read(rpath + nest.impl.tag + '.mdf') # adjust variables #create the ini file if 'Filic' not in order: order.append('Filic') inp['Filic'] = nest.impl.tag + '.ini' pdata = data([parent]) s1 = pdata.get_data('S1', step=1) u1 = pdata.get_data('U1', step=1) v1 = pdata.get_data('V1', step=1) xz = pdata.get_data('XZ') yz = pdata.get_data('YZ') orig = pyresample.geometry.SwathDefinition(lons=xz, lats=yz) # original points targ = pyresample.geometry.SwathDefinition( lons=nest.impl.grid.x, lats=nest.impl.grid.y) # target grid s_ini = pyresample.kd_tree.resample_nearest(orig, s1, targ, radius_of_influence=100000, fill_value=0) u_ini = pyresample.kd_tree.resample_nearest(orig, u1, targ, radius_of_influence=100000, fill_value=0) v_ini = pyresample.kd_tree.resample_nearest(orig, v1, targ, radius_of_influence=100000, fill_value=0) # Write .ini file with open(rpath + nest.impl.tag + '.ini', 'w') as f: np.savetxt(f, s_ini) np.savetxt(f, u_ini) np.savetxt(f, v_ini) #create the bc file if 'Filbnd' not in order: order.append('Filbnd') if 'Filana' not in order: order.append('Filana') inp['Filbnd'] = nest.impl.tag + '.bnd' inp['Filana'] = nest.impl.tag + '.bca' # bca = inp['Restid'] = '##' #no restart file # update mdf mdf.write(inp, rpath + nest.impl.tag + '.mdf', selection=order) # run case sys.stdout.write('executing\n') sys.stdout.flush() os.chdir(rpath) #subprocess.call(rpath+'run_flow2d3d.sh',shell=True) nest.run() nest.save() logging.info('nesting run done for date :' + datetime.datetime.strftime(date, '%Y%m%d.%H'))
def __init__(self, **kwargs): for attr, value in kwargs.iteritems(): setattr(self, attr, value) logging.basicConfig(filename=self.path + self.case + ".log", level=logging.INFO) parent = kwargs.get("parent", ".") # load model with open(parent + "info.pkl", "r") as f: info = pickle.load(f) info["minlon"] = kwargs.get("minlon", None) info["maxlon"] = kwargs.get("maxlon", None) info["minlat"] = kwargs.get("minlat", None) info["maxlat"] = kwargs.get("maxlat", None) info["rpath"] = kwargs.get("rpath", info["rpath"] + "./nested/") rpath = info["rpath"] # save for later info["resolution"] = kwargs.get("resolution", None) info["atm"] = False # create new case nest = model(**info) nest.set() # setup nested run nest.output() # output to run folder check = [os.path.exists(parent + f) for f in ["u.amu", "v.amv", "p.amp"]] if np.any(check) == False: nest.force() nest.uvp() # write u,v,p files else: # link u,v,p for filename in ["u.amu", "v.amv", "p.amp"]: os.symlink(parent + filename, rpath + filename) # modify mdf file inp, order = mdf.read(rpath + nest.impl.tag + ".mdf") # adjust variables # create the ini file if "Filic" not in order: order.append("Filic") inp["Filic"] = nest.impl.tag + ".ini" pdata = data([parent]) s1 = pdata.get_data("S1", step=1) u1 = pdata.get_data("U1", step=1) v1 = pdata.get_data("V1", step=1) xz = pdata.get_data("XZ") yz = pdata.get_data("YZ") orig = pyresample.geometry.SwathDefinition(lons=xz, lats=yz) # original points targ = pyresample.geometry.SwathDefinition(lons=nest.impl.grid.x, lats=nest.impl.grid.y) # target grid s_ini = pyresample.kd_tree.resample_nearest(orig, s1, targ, radius_of_influence=100000, fill_value=0) u_ini = pyresample.kd_tree.resample_nearest(orig, u1, targ, radius_of_influence=100000, fill_value=0) v_ini = pyresample.kd_tree.resample_nearest(orig, v1, targ, radius_of_influence=100000, fill_value=0) # Write .ini file with open(rpath + nest.impl.tag + ".ini", "w") as f: np.savetxt(f, s_ini) np.savetxt(f, u_ini) np.savetxt(f, v_ini) # create the bc file if "Filbnd" not in order: order.append("Filbnd") if "Filana" not in order: order.append("Filana") inp["Filbnd"] = nest.impl.tag + ".bnd" inp["Filana"] = nest.impl.tag + ".bca" # bca = inp["Restid"] = "##" # no restart file # update mdf mdf.write(inp, rpath + nest.impl.tag + ".mdf", selection=order) # run case sys.stdout.write("executing\n") sys.stdout.flush() os.chdir(rpath) # subprocess.call(rpath+'run_flow2d3d.sh',shell=True) nest.run() nest.save() logging.info("nesting run done for date :" + datetime.datetime.strftime(date, "%Y%m%d.%H"))
def schism(tmpdir): # initialize a model rpath = str(tmpdir) + "/schism/" case.update({"rpath": rpath + "20181001.00/"}) # use tmpdir for running the model b = pyposeidon.model(**case) b.execute() # creating a time sequence of the runs start_date = pd.to_datetime("2018-10-1 0:0:0") end_date = pd.to_datetime("2018-10-2 0:0:0") date_list = pd.date_range(start_date, end_date, freq="12H") # creating a sequence of folder to store the runs. In this case we name them after the date attribute. # NOTE that the first folder is the fisrt run already perfomed!! rpaths = [ rpath + datetime.datetime.strftime(x, "%Y%m%d.%H") + "/" for x in date_list ] # creating a sequence of folder from which we read the meteo. meteo = [] for date in date_list: prev_date = pd.to_datetime(date) - pd.to_timedelta("12H") prev_date = prev_date.strftime(format="%Y-%m-%d %H:%M:%S") dr = pd.date_range(prev_date, date, freq="12H") names = [ "uvp_" + datetime.datetime.strftime(x, "%Y%m%d%H") + ".grib" for x in dr ] dur = [(DATA_DIR / name).as_posix() for name in names] meteo.append(dur) # set cast for l in range(len(rpaths) - 1): h = cast.cast(solver="schism", model=b, ppath=rpaths[l], cpath=rpaths[l + 1], meteo=meteo[l + 1], date=date_list[l + 1]) h.set(execute=True) # execute # Run check case - Total duration check.update({"rpath": rpath + "check/"}) # use tmpdir for running the model # Combine meteo appropriately m1 = pm.meteo(meteo_source=METEO_FILES_2[0], meteo_engine="cfgrib") m2 = pm.meteo(meteo_source=METEO_FILES_2[1], meteo_engine="cfgrib") m3 = pm.meteo(meteo_source=METEO_FILES_2[2], meteo_engine="cfgrib") m4 = pm.meteo(meteo_source=METEO_FILES_2[3], meteo_engine="cfgrib") # extract correct chunk w1 = m1.Dataset.isel(time=slice(0, 13)) w2 = m2.Dataset.isel(time=slice( 1, 13)) # note that we keep the 12 hour from the previous file w3 = m3.Dataset.isel(time=slice(1, 13)) w4 = m4.Dataset.isel(time=slice(1, 13)) # combine meteo = xr.combine_by_coords([w1, w2, w3, w4], combine_attrs="override") # saving check.update({"meteo_source": meteo}) c = pyposeidon.model(**check) c.execute() # COMPARE output = data.data(folders=rpaths, solver="schism") total = data.data(folders=[rpath + "check/"], solver="schism") r = output.Dataset.isel(time=slice(0, 36)) rb = [] for var in total.Dataset.data_vars: if not total.Dataset[var].equals(r[var]): rb.append(var) print(rb) # flag = True TODO # for var in rb: # flag = False # mdif = np.abs(total.results.Dataset[var].values - output.results.Dataset[var].values).max() # if mdif < 1.e-14 : # flag = True # print(mdif) if (rb == ["zcor"]) or rb == []: return True else: return False