Beispiel #1
0
def d3d(tmpdir, dic):
    #initialize a model
    rpath = str(tmpdir) + '/'
    dic.update({'rpath': rpath})  # use tmpdir for running the model
    b = pyPoseidon.model(**dic)

    try:
        b.execute()
        a = pyPoseidon.read_model(rpath + 'd3d_model.json')  # read model
        a.execute()
        return True
    except:
        return False
def schism(tmpdir, case):
    #initialize a model
    dic = {
        'solver': 'schism',
        'geometry': case,
        'manning': .12,
        'windrot': 0.00001,
        'tag': 'test',
        'start_date': '2017-10-1 0:0:0',
        'time_frame': '12H',
        'meteo_source': [DATA_DIR / 'erai.grib'],  #meteo file
        'meteo_engine': 'cfgrib',
        'dem_source': DEM_FILE,
        'ncores': NCORES,  #number of cores
        'update': ['all'],  #update only meteo, keep dem
        'parameters': {
            'dt': 400,
            'rnday': 0.3,
            'nhot': 0,
            'ihot': 0,
            'nspool': 9,
            'ihfskip': 36,
            'nhot_write': 108
        }
    }

    rpath = str(tmpdir) + '/'
    dic.update({'rpath': rpath})  # use tmpdir for running the model

    b = pyPoseidon.model(**dic)

    try:
        b.execute()
        b.results()
        return True
    except:
        return False
Beispiel #3
0
    def __init__(self,**kwargs):
               
        for attr, value in kwargs.iteritems():
                setattr(self, attr, value)
                
        logging.basicConfig(filename=self.path+self.case+'.log',level=logging.INFO)    
        
        
        parent = kwargs.get('parent', '.')        
        
        # load model
        with open(parent+'info.pkl', 'r') as f:
                      info=pickle.load(f)
   
        
        info['minlon'] = kwargs.get('minlon', None)        
        info['maxlon'] = kwargs.get('maxlon', None)        
        info['minlat'] = kwargs.get('minlat', None)        
        info['maxlat'] = kwargs.get('maxlat', None)    
        
            
        info['rpath'] = kwargs.get('rpath', info['rpath']+'./nested/')
        rpath = info['rpath'] #save for later
        
        info['resolution'] = kwargs.get('resolution', None)
        
        info['atm'] = False
        
        
        #create new case 
        nest = model(**info)    
        
        nest.set() #setup nested run 
        
        nest.output() #output to run folder      
            
        check=[os.path.exists(parent+f) for f in ['u.amu','v.amv','p.amp']]   
        if np.any(check)==False :
               
                nest.force()
                nest.uvp()  #write u,v,p files 
        
        else: #link u,v,p
            for filename in ['u.amu','v.amv','p.amp']:
                os.symlink(parent+filename,rpath+filename)
                
                
        # modify mdf file    
        inp, order = mdf.read(rpath+nest.impl.tag+'.mdf')    
                    
        # adjust variables
        
        #create the ini file
        
        if 'Filic' not in order: order.append('Filic')
        inp['Filic']=nest.impl.tag + '.ini'
        
        pdata = data([parent])
        
        s1 = pdata.get_data('S1',step=1)
        u1 = pdata.get_data('U1',step=1)
        v1 = pdata.get_data('V1',step=1)
        
        xz=pdata.get_data('XZ')
        yz=pdata.get_data('YZ')
        
        orig = pyresample.geometry.SwathDefinition(lons=xz,lats=yz) # original points
        targ = pyresample.geometry.SwathDefinition(lons=nest.impl.grid.x,lats=nest.impl.grid.y) # target grid
        
        s_ini = pyresample.kd_tree.resample_nearest(orig,s1,targ,radius_of_influence=100000,fill_value=0)
        u_ini = pyresample.kd_tree.resample_nearest(orig,u1,targ,radius_of_influence=100000,fill_value=0)
        v_ini = pyresample.kd_tree.resample_nearest(orig,v1,targ,radius_of_influence=100000,fill_value=0)
        
        
        # Write .ini file
        with open(rpath+nest.impl.tag+'.ini', 'w') as f:
           np.savetxt(f,s_ini)
           np.savetxt(f,u_ini)
           np.savetxt(f,v_ini)
        
        
         #create the bc file
        
        if 'Filbnd' not in order: order.append('Filbnd')
        if 'Filana' not in order: order.append('Filana')
        
        inp['Filbnd']=nest.impl.tag+'.bnd'
        inp['Filana']=nest.impl.tag+'.bca'

        
#        bca = 


        inp['Restid']='##' #no restart file

        # update mdf
        mdf.write(inp, rpath+nest.impl.tag+'.mdf',selection=order)
                                  
        # run case
        sys.stdout.write('executing\n')
        sys.stdout.flush()
         
        os.chdir(rpath)
        #subprocess.call(rpath+'run_flow2d3d.sh',shell=True)
        nest.run()

        nest.save()
            
        logging.info('nesting run done for date :'+datetime.datetime.strftime(date,'%Y%m%d.%H'))
            

#    def get_ini():
    
    
#    def get_bca():
        
        
Beispiel #4
0
def d3d(tmpdir,dic):
    #initialize a model
    rpath = str(tmpdir)
    dic.update({'rpath':rpath + '/20181001.00/'}) # use tmpdir for running the model
    b = pyPoseidon.model(**dic)

    b.execute()
    # Cast
    #read the info from the first run
    with open(rpath+'/20181001.00/d3d_model.json', 'rb') as f:
        info = pd.read_json(f,lines=True).T
        info[info.isnull().values] = None
        info = info.to_dict()[0]

    info.update({'path':rpath}) # The path of the project

    #creating a time sequence of the runs
    start_date = pd.to_datetime('2018-10-1 0:0:0')
    end_date = pd.to_datetime('2018-10-1 12:0:0')
    date_list = pd.date_range(start_date,end_date, freq='12H')
    #append to dic
    info.update({'start_date':start_date,'end_date':end_date, 'dates' : date_list})

    #creating a sequence of folder to store the runs. In this case we name them after the date attribute.
    #NOTE that the first folder is the fisrt run already perfomed!!
    folders = [datetime.datetime.strftime(x, '%Y%m%d.%H') for x in date_list]
    info.update({'folders':folders})

    #set meteo files
    meteo = []
    for date in date_list:
        end_date= pd.to_datetime(date) + pd.to_timedelta(info['time_frame'])
        end_date = end_date.strftime(format='%Y-%m-%d %H:%M:%S')
        dr = pd.date_range(date, end_date, freq='12H')
        dur = [(DATA_DIR / ('uvp_' + datetime.datetime.strftime(x, '%Y%m%d%H') + '.grib')).as_posix() for x in dr]
        meteo.append(dur)
    info.update({'meteo_source':meteo})
    print(meteo)

    info.update({'time_frame' : len(folders)*[info['time_frame']]})


    h = cast.cast(**info) # initialize
    h.run()
    # combine output
    folders = [info['path']+'/'+f for f in info['folders']]
    res = data.data(folders=folders,solver='d3d')


    # check single run
    case2.update({'rpath':rpath + '/combined/'})
    a = pyPoseidon.model(**case2)
    a.execute()
    out = data.data(**case2)

    test = True
    for var in out.Dataset.data_vars:
        if not out.Dataset[var].equals(res.Dataset[var]):
            if np.abs(out.Dataset[var].values-res.Dataset[var].values).max() > 1.e-6 : test = False


    return test
def d3d(tmpdir):
    #initialize a model
    rpath = str(tmpdir) + '/d3d/'
    case.update({'rpath':
                 rpath + '/20181001.00/'})  # use tmpdir for running the model
    b = pyPoseidon.model(**case)

    b.execute()

    #creating a time sequence of the runs
    start_date = pd.to_datetime('2018-10-1 0:0:0')
    end_date = pd.to_datetime('2018-10-2 0:0:0')
    date_list = pd.date_range(start_date, end_date, freq='12H')

    #creating a sequence of folder to store the runs. In this case we name them after the date attribute.
    #NOTE that the first folder is the fisrt run already perfomed!!
    rpaths = [
        rpath + datetime.datetime.strftime(x, '%Y%m%d.%H') + '/'
        for x in date_list
    ]

    #set meteo files
    meteo = []
    for date in date_list:
        end_date = pd.to_datetime(date) + pd.to_timedelta('12H')
        end_date = end_date.strftime(format='%Y-%m-%d %H:%M:%S')
        dr = pd.date_range(date, end_date, freq='12H')
        names = [
            'uvp_' + datetime.datetime.strftime(x, '%Y%m%d%H') + '.grib'
            for x in dr
        ]
        dur = [(DATA_DIR / name).as_posix() for name in names]
        meteo.append(dur)

    #set cast
    for l in range(len(rpaths) - 1):
        h = cast.cast(solver='d3d',
                      model=b,
                      ppath=rpaths[l],
                      cpath=rpaths[l + 1],
                      meteo=meteo[l + 1],
                      date=date_list[l + 1])
        h.set(execute=True)  # execute

    # Run check case - Total duration
    check.update({'rpath':
                  rpath + 'check/'})  # use tmpdir for running the model
    c = pyPoseidon.model(**check)
    c.execute()

    # COMPARE
    output = data.data(folders=rpaths, solver='d3d')
    total = data.data(folders=[rpath + 'check/'], solver='d3d')

    test = True
    rb = []
    for var in total.Dataset.data_vars:
        if not total.Dataset[var].equals(output.Dataset[var]):
            rb.append(var)
            if np.abs(total.Dataset[var].values -
                      output.Dataset[var].values).max() > 1.e-6:
                test = False

    print(rb)
    return test
Beispiel #6
0
def schism(tmpdir):
    #initialize a model
    rpath = str(tmpdir)+'/schism/'
    case.update({'rpath':rpath+'20181001.00/'}) # use tmpdir for running the model

    b = pyPoseidon.model(**case)

    b.execute()

    #creating a time sequence of the runs
    start_date = pd.to_datetime('2018-10-1 0:0:0')
    end_date = pd.to_datetime('2018-10-2 0:0:0')
    date_list = pd.date_range(start_date,end_date, freq='12H')

    #creating a sequence of folder to store the runs. In this case we name them after the date attribute.
    #NOTE that the first folder is the fisrt run already perfomed!!
    rpaths = [rpath + datetime.datetime.strftime(x, '%Y%m%d.%H') +'/' for x in date_list]

    #creating a sequence of folder from which we read the meteo.
    meteo = []
    for date in date_list:
        prev_date= pd.to_datetime(date) - pd.to_timedelta('12H')
        prev_date = prev_date.strftime(format='%Y-%m-%d %H:%M:%S')
        dr = pd.date_range(prev_date, date, freq='12H')
        names = ['uvp_'+ datetime.datetime.strftime(x, '%Y%m%d%H') + '.grib' for x in dr]
        dur = [ (DATA_DIR / name).as_posix() for name in names ] 
        meteo.append(dur)

    #set cast
    for l in range(len(rpaths)-1):
        h = cast.cast(solver='schism',model=b,ppath=rpaths[l],cpath=rpaths[l+1],meteo=meteo[l+1], date=date_list[l+1])
        h.set(execute=True) # execute

    # Run check case - Total duration
    check.update({'rpath':rpath+'check/'}) # use tmpdir for running the model

    # Combine meteo appropriately

    m1 = pm.meteo(meteo_source=METEO_FILES_2[0],meteo_engine='cfgrib')
    m2 = pm.meteo(meteo_source=METEO_FILES_2[1],meteo_engine='cfgrib')
    m3 = pm.meteo(meteo_source=METEO_FILES_2[2],meteo_engine='cfgrib')
    m4 = pm.meteo(meteo_source=METEO_FILES_2[3],meteo_engine='cfgrib')

    # extract correct chunk

    w1 = m1.Dataset.isel(time=slice(0,13))
    w2 = m2.Dataset.isel(time=slice(1,13)) # note that we keep the 12 hour from the previous file
    w3 = m3.Dataset.isel(time=slice(1,13))
    w4 = m4.Dataset.isel(time=slice(1,13))

    #combine
    meteo = xr.combine_by_coords([w1,w2,w3,w4],combine_attrs='override')
    #saving
    check.update({'meteo_source' : meteo})
    
    c = pyPoseidon.model(**check)

    c.execute()

    # COMPARE
    output = data.data(folders=rpaths,solver='schism')

    total = data.data(folders=[rpath+'check/'],solver='schism')

    r = output.Dataset.isel(time=slice(0,36))
    

    rb = []
    for var in total.Dataset.data_vars:
        if not total.Dataset[var].equals(r[var]):
            rb.append(var)

    print(rb)


#    flag = True TODO
#    for var in rb:
#        flag = False
#        mdif = np.abs(total.results.Dataset[var].values - output.results.Dataset[var].values).max()
#        if mdif < 1.e-14 :
#            flag = True
#    print(mdif)

    if (rb == ['zcor']) or rb==[]:
        return True
    else:
        return False
Beispiel #7
0
def schism(tmpdir):
    #initialize a model
    rpath = str(tmpdir) + '/schism/'
    case.update({'rpath':
                 rpath + '20181001.00/'})  # use tmpdir for running the model

    b = pyPoseidon.model(**case)

    b.execute()

    # run the cast
    with open(rpath + '20181001.00/schism_model.json', 'rb') as f:
        info = pd.read_json(f, lines=True).T
        info[info.isnull().values] = None
        info = info.to_dict()[0]

    info.update({'path': rpath})

    #creating a time sequence of the runs
    start_date = pd.to_datetime('2018-10-1 0:0:0')
    end_date = pd.to_datetime('2018-10-2 0:0:0')
    date_list = pd.date_range(start_date, end_date, freq='12H')

    #append to dic
    info.update({
        'start_date': start_date,
        'end_date': end_date,
        'dates': date_list
    })

    #creating a sequence of folder to store the runs. In this case we name them after the date attribute.
    #NOTE that the first folder is the fisrt run already perfomed!!
    folders = [datetime.datetime.strftime(x, '%Y%m%d.%H') for x in date_list]
    info.update({'folders': folders})

    #creating a sequence of folder from which we read the meteo.
    meteo = []
    for date in date_list:
        end_date = pd.to_datetime(date) + pd.to_timedelta(info['time_frame'])
        end_date = end_date.strftime(format='%Y-%m-%d %H:%M:%S')
        dr = pd.date_range(date, end_date, freq='12H')
        names = [
            'uvp_' + datetime.datetime.strftime(x, '%Y%m%d%H') + '.grib'
            for x in dr
        ]
        dur = [(DATA_DIR / name).as_posix() for name in names]
        meteo.append(dur)

    info.update({'meteo_source': meteo})

    info['time_frame'] = len(folders) * [info['time_frame']]

    #set cast
    h = cast.cast(**info)  # initialize

    h.run()

    # Run check case - Total duration
    check.update({'rpath':
                  rpath + 'check/'})  # use tmpdir for running the model

    c = pyPoseidon.model(**check)

    c.execute()

    # COMPARE
    folders = [info['path'] + f for f in info['folders']]
    output = data.data(folders=folders, solver='schism')

    total = data.data(folders=[rpath + 'check/'], solver='schism')

    rb = []
    for var in total.Dataset.data_vars:
        if not total.Dataset[var].equals(output.Dataset[var]):
            rb.append(var)

    print(rb)

    #    flag = True TODO
    #    for var in rb:
    #        flag = False
    #        mdif = np.abs(total.results.Dataset[var].values - output.results.Dataset[var].values).max()
    #        if mdif < 1.e-14 :
    #            flag = True
    #    print(mdif)

    if (rb == ['zcor']) or rb == []:
        return True
    else:
        return False
def schism(tmpdir):
    #initialize a model
    rpath = str(tmpdir) + '/schism/'
    case.update({'rpath':
                 rpath + '20181001.00/'})  # use tmpdir for running the model

    #creating a time sequence of the runs
    start_date = pd.to_datetime('2018-10-1 0:0:0')
    end_date = pd.to_datetime('2018-10-2 0:0:0')
    date_list = pd.date_range(start_date, end_date, freq='12H')

    m0 = pm.meteo(meteo_source=METEO_FILES_1, engine='cfgrib')

    case.update({'meteo_source': m0.Dataset})

    b = pyPoseidon.model(**case)

    b.execute()

    # run the cast
    with open(rpath + '20181001.00/schism_model.json', 'rb') as f:
        info = pd.read_json(f, lines=True).T
        info[info.isnull().values] = None
        info = info.to_dict()[0]

    info.update({'path': rpath})

    #append to dic
    info.update({
        'start_date': start_date,
        'end_date': end_date,
        'dates': date_list
    })

    #creating a sequence of folder to store the runs. In this case we name them after the date attribute.
    #NOTE that the first folder is the fisrt run already perfomed!!
    folders = [datetime.datetime.strftime(x, '%Y%m%d.%H') for x in date_list]
    info.update({'folders': folders})

    #creating a sequence of folder from which we read the meteo.
    meteo = [m0.Dataset]
    for date in date_list[1:]:
        end_date = pd.to_datetime(date) + pd.to_timedelta(info['time_frame'])
        end_date = end_date.strftime(format='%Y-%m-%d %H:%M:%S')
        dr = [date - pd.to_timedelta('12H'), date]
        names = [
            'uvp_' + datetime.datetime.strftime(x, '%Y%m%d%H') + '.grib'
            for x in dr
        ]
        dur = [(DATA_DIR / name).as_posix() for name in names]
        m1 = pm.meteo(meteo_source=dur[0], engine='cfgrib')
        m2 = pm.meteo(meteo_source=dur[1], engine='cfgrib')
        w1 = m1.Dataset.isel(time=slice(12, 13))
        w2 = m2.Dataset.isel(time=slice(
            1, None))  # note that we keep the 12 hour from the previous file
        mf = xr.combine_by_coords([w1, w2])
        meteo.append(mf)

    info.update({'meteo_source': meteo})

    info['time_frame'] = len(folders) * [info['time_frame']]

    #set cast
    h = cast.cast(**info)  # initialize

    h.run()

    # Run check case - Total duration
    check.update({'rpath':
                  rpath + 'check/'})  # use tmpdir for running the model

    # Combine meteo appropriately

    m1 = pm.meteo(meteo_source=METEO_FILES_2[0], engine='cfgrib')
    m2 = pm.meteo(meteo_source=METEO_FILES_2[1], engine='cfgrib')
    m3 = pm.meteo(meteo_source=METEO_FILES_2[2], engine='cfgrib')
    m4 = pm.meteo(meteo_source=METEO_FILES_2[3], engine='cfgrib')

    # extract correct chunk

    w1 = m1.Dataset.isel(time=slice(0, 13))
    w2 = m2.Dataset.isel(time=slice(
        1, 13))  # note that we keep the 12 hour from the previous file
    w3 = m3.Dataset.isel(time=slice(1, 13))
    w4 = m4.Dataset.isel(time=slice(1, 13))

    #combine
    meteo = xr.combine_by_coords([w1, w2, w3, w4])
    #saving
    check.update({'meteo_source': meteo})

    c = pyPoseidon.model(**check)

    c.execute()

    # COMPARE
    folders = [info['path'] + f for f in info['folders']]
    output = data.data(folders=folders, solver='schism')

    total = data.data(folders=[rpath + 'check/'], solver='schism')

    r = output.Dataset.isel(time=slice(0, 36))

    rb = []
    for var in total.Dataset.data_vars:
        if not total.Dataset[var].equals(r[var]):
            rb.append(var)

    print(rb)

    #    flag = True TODO
    #    for var in rb:
    #        flag = False
    #        mdif = np.abs(total.results.Dataset[var].values - output.results.Dataset[var].values).max()
    #        if mdif < 1.e-14 :
    #            flag = True
    #    print(mdif)

    if (rb == ['zcor']) or rb == []:
        return True
    else:
        return False