Beispiel #1
0
    def __init__(self,**kwargs):
        
        rpath = kwargs.get('rpath','./d3d/')
        
        folders = kwargs.get('folders',None)  #[os.path.join(os.path.abspath(loc),name) for name in os.listdir(loc) if os.path.isdir(os.path.join(loc,name))]
        
        if folders :
            self.folders = folders
        else:
            self.folders = [rpath]
        
        #check if many tags present
        ifiles = glob.glob(self.folders[0]+'/*_model.json')
                
        if len(ifiles) > 1:
            #--------------------------------------------------------------------- 
              logger.warning('more than one configuration, specify tag argument \n')
            #--------------------------------------------------------------------- 
                    
        tag = kwargs.get('tag', None)
        
        if tag :
            ifile = self.folders[0]+'/'+tag+'_model.json'
        else:
            ifile = ifiles[0]
        
        #--------------------------------------------------------------------- 
        logger.info('reading data based on {} \n'.format(ifile))
        #--------------------------------------------------------------------- 

        with open(ifile, 'rb') as f:
            info = pd.read_json(f,lines=True).T
            info[info.isnull().values] = None
            self.info = info.to_dict()[0]
                       
                        
        grid=r2d.read_file(self.folders[0]+'/'+self.info['tag']+'.grd')
            
        deb=np.loadtxt(self.folders[0]+'/'+self.info['tag']+'.dep')
        
        #create mask
            
        d=deb[1:-1,1:-1]
        self.w=d==-999.
        
        b=deb[:-1,:-1]
        b[b==-999.]=np.nan
        
        self.dem = xr.Dataset({'bathymetry': (['latitude', 'longitude'], -b)},
                    coords={'longitude': ('longitude', grid.lons[0,:]),   
                            'latitude': ('latitude', grid.lats[:,0])})
        
        self.grid = grid
            
            
        # READ DATA
                                                        
        nfiles = [folder +'/'+'trim-'+self.info['tag']+'.nc' for folder in self.folders]
        
        ds = xr.open_mfdataset(nfiles, combine='by_coords',data_vars ='minimal')
                
        self.Dataset=ds
            
        #clean duplicates
        self.Dataset = self.Dataset.sel(time=~self.Dataset.indexes['time'].duplicated())
                             
        dic = self.info.copy()   # start with x's keys and values
        dic.update(kwargs)    # modifies z with y's keys and values & returns None
        
        if 'sa_date' not in dic.keys():
            dic.update({'sa_date':self.Dataset.time.values[0]})
            
        if 'se_date' not in dic.keys():
            dic.update({'se_date':self.Dataset.time.values[-1]})
               
        self.obs = obs(**dic)
Beispiel #2
0
    def __init__(self,**kwargs):     
                
        rpath = kwargs.get('rpath','./schism/')
        
        folders = kwargs.get('folders',None)  #[os.path.join(os.path.abspath(loc),name) for name in os.listdir(loc) if os.path.isdir(os.path.join(loc,name))]
        
        if folders :
            self.folders = folders
        else:
            self.folders = [rpath]

        
                
        datai=[]
        
        tag = kwargs.get('tag', 'schism')
        
        misc = kwargs.get('misc', {})
                        
        for folder in self.folders:
            
            logger.info('Combining output for folder {}\n'.format(folder))
            
                                        
            xdat = glob.glob(folder + '/outputs/schout_[!0]*.nc')
            xdat.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
                
            if len(xdat) > 0:
                datai.append(xdat) #append to list 

            else:   #run merge output 
            
                with open(folder + '/' + tag +'_model.json', 'r') as f:
                    info = pd.read_json(f,lines=True).T
                    info[info.isnull().values] = None
                    info = info.to_dict()[0]
            
                p = pm.model(**info)
                            
                p.misc = misc
                            
                p.results()
                
                self.misc = p.misc
                                                    
                xdat = glob.glob(folder + '/outputs/schout_[!0]*.nc')
                xdat.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
            
                datai.append(xdat) #append to list 
                                       
        self.Dataset = xr.open_mfdataset(datai,combine='by_coords',data_vars='minimal')

        
        with open(self.folders[-1] + '/' + tag +'_model.json', 'r') as f:
            info = pd.read_json(f,lines=True).T
            info[info.isnull().values] = None
            info = info.to_dict()[0]
        
        p = pm.model(**info)
        
        logger.info('Retrieve station timeseries if any\n')
        
        dstamp = kwargs.get('dstamp', info['date'])
        try: 
            p.get_station_data(dstamp=dstamp)
            self.time_series = p.time_series
        except:
            logger.info('no station data loaded')     
    
        
        dic={}
        
        try:
            
            with open(self.folders[0]+ '/' + tag +'_model.json', 'r') as f:
                      info = pd.read_json(f,lines=True).T
                      info[info.isnull().values] = None
                      self.info = info.to_dict()[0]
                        
    
            dic = self.info.copy()   # start with x's keys and values
            dic.update(kwargs)    # modifies z with y's keys and values & returns None
    
        except:
            pass
    
        if 'sa_date' not in dic.keys():
            dic.update({'sa_date':self.Dataset.time.values[0]})
        
        if 'se_date' not in dic.keys():
            dic.update({'se_date':self.Dataset.time.values[-1]})
            
        if 'lon_min' not in dic.keys():
            dic.update({'lon_min':self.Dataset.SCHISM_hgrid_node_x.values.min()})

        if 'lon_max' not in dic.keys():
            dic.update({'lon_max':self.Dataset.SCHISM_hgrid_node_x.values.max()})
            
        if 'lat_min' not in dic.keys():
            dic.update({'lat_min':self.Dataset.SCHISM_hgrid_node_y.values.min()})
        
        if 'lat_max' not in dic.keys():
            dic.update({'lat_max':self.Dataset.SCHISM_hgrid_node_y.values.max()})
        
        logger.info('Retrieve observations info\n')
        
        self.obs = obs(**dic)
        
        ret = kwargs.get('online',False)
        
        if ret is True:
            logger.info('collect observational data')
            tgs = self.obs.locations.loc[self.obs.locations.Group=='TD UNESCO']
        
            dic={}
            for i in tgs.index:
            #            print(i, tgs.loc[i].Name.strip())
                    while True:
                            p = self.obs.iloc(i)
                            if p is not None:
                                if p.shape[0]>1:
                                    p = p.dropna()
                                break
                    dic.update({tgs.loc[i].Name.strip():p})

            tg = pd.concat(dic, axis=0, sort=True)
            
            try:
                tg = tg.drop('TimeUTC',axis=1)
            except:
                pass
        
            tg.to_csv(self.folders[0]+ '/' + 'obs.csv')
        
            self.obs.data = tg