예제 #1
0
    def apply_single_point_fractional_cover(self, base_file_path, fractional_cover, lat, lon, key='frac'):
        """
        Change the fractional land cover at a single point in the file
        :param base_file_path: Path of the base land cover file to edit
        :param fractional_cover: List of fractional cover values to set the point to
        :param lat: Latitude of point to edit
        :param lon: Longitude of point to edit
        :param key: The name of the fractional cover variable in the land cover file
        :raise ServiceException:
        """
        base = Dataset(base_file_path, 'r+')
        base_frac = base.variables[key]
        base_frac_array = base_frac[:, :, :]

        # Get the indices of the latitude and longitude position
        lat_key = self._nc_helper.look_for_key(base.variables.keys(), constants.NETCDF_LATITUDE)
        lon_key = self._nc_helper.look_for_key(base.variables.keys(), constants.NETCDF_LONGITUDE)
        if lat_key is None or lon_key is None:
            log.exception("Could not apply land cover edit: could not identify latitude and longitude variables")
            raise ServiceException("Could not apply land cover edit: could not identify "
                                   "latitude and longitude variables")
        lat_index, lon_index = self._nc_helper.get_lat_lon_index(base.variables, lat_key, lon_key, lat, lon)

        n_pseudo = base_frac.shape[0]
        if n_pseudo != len(fractional_cover):
            log.exception("Could not apply land cover edit: the number of fractional values supplied did not match "
                          "the number of types in the netCDF file.")
            raise ServiceException("Could not apply land cover edit: the number of fractional values supplied did not "
                                   "match the number of types in the netCDF file.")
        for pseudo in np.arange(n_pseudo):
            base_frac_array[pseudo, lat_index, lon_index] = fractional_cover[pseudo]
        base_frac[:, :, :] = base_frac_array
        base.close()
예제 #2
0
 def test_singletime(self):
     # issue 215 test (date2index with time variable length == 1)
     f = Dataset(self.file)
     time2 = f.variables['time2']
     result_index = date2index(self.first_timestamp,time2, select="exact")
     assert_equal(result_index, 0)
     f.close()
예제 #3
0
파일: ipyview.py 프로젝트: yeyuguo/metamet
def load_everything(fname, basket_dest=''):
    try:
        fname = expand_path(fname)
        ext = get_ext(fname)
        if ext == '.npy':
            return np.load(fname), "npy"
        elif ext in ('.nc', '.ncf'):
            data = Dataset(fname, mode='r')
            if 'lidarname' in data.ncattrs():
                return LidarDataset(fname), "LidarDataset"
            else:
                return data, "netcdf"
        elif ext in ('.h5', '.h5f', '.hdf', '.hdf5'):
            return pd.HDFStore(fname, mode='r'), "pd.HDFStore"
        elif ext in ('.csv'):
            return pd.DataFrame.from_csv(fname), "pd.DataFrame"
        elif ext in ('.zip'):
            if basket_dest:
                globald[basket_dest] = dict()
                varnames = loadbasket(fname, dest=globald[basket_dest])
            else:
                varnames = loadbasket(fname)
            return varnames, "basket"
        elif ext in ('.pickle', '.pic'):
            return loadpickle(fname), "pickle"
        elif ext in ('.txt'):
            return np.loadtxt(fname), "txt"
    except Exception as e:
        warner.write("Error while loading : %s \n" % fname)
        warner.write(e)
        warner.write('\n')
예제 #4
0
def read(filename):
    """Reads attributes from a file."""
    try:
        nc = NC(filename)
    except:
        print "ERROR: can't open %s" % filename
        sys.exit(0)

    names = ['pism_config', 'pism_overrides']
    varname = None
    var = None
    for name in names:
        try:
            var = nc.variables[name]
            varname = name
        except:
            pass

    if var == None:
        print "ERROR: can't find 'pism_config' or 'pism_overrides' in '%s'." % filename
        sys.exit(0)

    attrs = var.ncattrs()
    dict = {}
    for each in attrs:
        dict[each] = getattr(var, each)
    nc.close()

    return (varname, dict)
예제 #5
0
def read_netcdf(nc_file, variables=None, coords=None):
    """
    Read data from input netCDF. Will read all variables if none provided.
    Will also return all variable attributes.
    Both variables (data and attributes) are returned as dictionaries named
    by variable
    """

    f = Dataset(nc_file, 'r')

    if not variables:
        variables = f.variables.keys()
    if not coords:
        coords = slice(None)

    log.debug('Reading input data variables: %s, from file: %s', variables,
              nc_file)

    d = {}
    a = {}
    g = {}

    for var in variables:
        d[var] = f.variables[var][coords]
        a[var] = f.variables[var].__dict__

    for attr in f.ncattrs():
        g[attr] = getattr(f, attr)

    f.close()

    return d, a, g
def main(args):
    vars_ = '|'.join(args.variables)
    test_files = iter_matching(args.basedir, re.compile('.*({}).*(_rcp26|_rcp45|_rcp85|_historical_).*r1i1p1.*nc'.format(vars_)))

    if args.dry_run:
        for f in test_files:
            print f
        sys.exit(0)

    FileType = ClimdexFile if args.climdex else Cmip5File

    for fp in test_files:
        log.info(fp)

        nc = Dataset(fp)
        available_climo_periods = determine_climo_periods(nc)
        nc.close()
        file_ = FileType(fp)
        variable = file_.variable

        for _, t_range in available_climo_periods.items():

            # Create climatological period and update metadata
            log.info('Generating climo period %s to %s', d2s(t_range[0]), d2s(t_range[1]))
            out_fp = file_.generate_climo_fp(t_range, args.outdir)
            log.info('Output file: %s', format(out_fp))
            try:
                create_climo_file(fp, out_fp, t_range[0], t_range[1], variable)
            except:
                log.warn('Failed to create climatology file')
            else:
                update_climo_time_meta(out_fp, FileType)
예제 #7
0
def get_level(resource, level):
  from flyingpigeon.ocgis_module import call
  from netCDF4 import Dataset
  from flyingpigeon.utils import get_variable
  from numpy import squeeze

  try:
    level_data = call(resource, level_range=[int(level),int(level)])
    if type(resource) == list:
      resource.sort()
    variable = get_variable(level_data)
    logger.info('found %s in file' % variable)
    ds = Dataset(level_data, mode='a')
    var = ds.variables.pop(variable)
    dims = var.dimensions
    new_var = ds.createVariable('z%s'% level, var.dtype, dimensions=(dims[0],dims[2],dims[3]))
    # i = where(var[:]==level)
    new_var[:,:,:] = squeeze(var[:,0,:,:])
    ds.close()
    logger.info('level %s extracted' % level)

    data = call(level_data , variable = 'z%s'%level)
    
  except Exception as e:
    logger.error('failed to extract level %s ' % e)
  return data
예제 #8
0
    def setUp(self):

        self.files = [tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name for nfile in range(2)]
        for nfile,file in enumerate(self.files):
            f = Dataset(file,'w',format='NETCDF4_CLASSIC')
            f.createDimension('time',None)
            f.createDimension('y',ydim)
            f.createDimension('z',zdim)
            f.history = 'created today'

            time = f.createVariable('time', 'f', ('time', ))
            #time.units = 'days since {0}-01-01'.format(1979+nfile)
            yr = 1979+nfile
            time.units = 'days since %s-01-01' % yr

            time.calendar = 'standard'

            x = f.createVariable('x','f',('time', 'y', 'z'))
            x.units = 'potatoes per square mile'

            nx1 = self.ninc*nfile;
            nx2 = self.ninc*(nfile+1)

            time[:] = np.arange(self.ninc)
            x[:] = np.arange(nx1, nx2).reshape(self.ninc,1,1) * np.ones((1, ydim, zdim))

            f.close()
예제 #9
0
    def setUp(self):
        """ Check that a the AIMS system or this script hasn't been modified.
        This function checks that a downloaded file still has the same md5.
        """
        logging_aims()
        channel_id                   = '8365'
        from_date                    = '2008-09-30T00:27:27Z'
        thru_date                    = '2008-09-30T00:30:00Z'
        level_qc                     = 1
        aims_rss_val                 = 100
        xml_url                      = 'http://data.aims.gov.au/gbroosdata/services/rss/netcdf/level%s/%s' % (str(level_qc), str(aims_rss_val))

        aims_xml_info                = parse_aims_xml(xml_url)
        channel_id_info = aims_xml_info[channel_id]
        self.netcdf_tmp_file_path    = download_channel(channel_id, from_date, thru_date, level_qc)
        modify_soop_trv_netcdf(self.netcdf_tmp_file_path, channel_id_info)

        # force values of attributes which change all the time
        netcdf_file_obj              = Dataset(self.netcdf_tmp_file_path, 'a', format='NETCDF4')
        netcdf_file_obj.date_created = "1970-01-01T00:00:00Z"
        netcdf_file_obj.history      = 'data validation test only'
        netcdf_file_obj.close()

        shutil.move(self.netcdf_tmp_file_path, remove_creation_date_from_filename(self.netcdf_tmp_file_path))
        self.netcdf_tmp_file_path    = remove_creation_date_from_filename(self.netcdf_tmp_file_path)
예제 #10
0
def load_era40(filename):
    era40 = Dataset(filename, mode="r")
    longitudes = era40.variables["longitude"][:]
    latitudes = era40.variables["latitude"][:]
    t = era40.variables["p2t"][0][:]
    era40.close()
    return longitudes, latitudes, t
예제 #11
0
 def runTest(self):
     """testing NETCDF3_64BIT_DATA format (CDF-5)"""
     f  = Dataset(self.netcdf_file, 'r')
     assert f.dimensions['dim'].size == dimsize
     assert_array_equal(arrdata, f.variables['var'][:ndim])
     assert (type(f.int64_attr) == np.int64)
     f.close()
예제 #12
0
 def test_0d(self):
     f = Dataset(self.file, 'w')
     v = f.createVariable('data', float)
     v[...] = 10
     assert_array_equal(v[...], 10)
     assert_equal(v.shape, v[...].shape)
     f.close()
예제 #13
0
class NetCDFData(Data):

    def __init__(self, url):
        self._dataset = None
        self.__timestamp_cache = TTLCache(1, 3600)
        super(NetCDFData, self).__init__(url)

    def __enter__(self):
        self._dataset = Dataset(self.url, 'r')

        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self._dataset.close()

    @property
    def timestamps(self):
        if self.__timestamp_cache.get("timestamps") is None:
            var = None
            for v in ['time', 'time_counter']:
                if v in self._dataset.variables:
                    var = self._dataset.variables[v]
                    break

            t = netcdftime.utime(var.units)
            timestamps = np.array(
                map(
                    lambda ts: t.num2date(ts).replace(tzinfo=pytz.UTC),
                    var[:]
                )
            )
            timestamps.flags.writeable = False
            self.__timestamp_cache["timestamps"] = timestamps

        return self.__timestamp_cache.get("timestamps")
예제 #14
0
    def setUp(self):

        self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name

        self.fillval = default_fillvals["i2"]
        self.v    = np.array([self.fillval, 5, 4, -9999], dtype = "i2")
        self.v_ma = ma.array([self.fillval, 5, 4, -9999], dtype = "i2", mask = [True, False, False, True])

        self.scale_factor = 10.
        self.add_offset = 5.

        self.v_scaled = self.v * self.scale_factor + self.add_offset
        self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset

        f = Dataset(self.testfile, 'w')
        _ = f.createDimension('x', None)
        v = f.createVariable('v', "i2", 'x')

        v.missing_value = np.array(-9999, v.dtype)

        # v[0] not set, will be equal to _FillValue
        v[1] = self.v[1]
        v[2] = self.v[2]
        v[3] = v.missing_value

        f.close()
예제 #15
0
    def test_3d(self):
        """testing variable slicing"""
        f  = Dataset(self.file, 'r')
        v = f.variables['data']
        vu = f.variables['datau']

        # test return of array scalar.
        assert_equal(v[0,0,0].shape,())
        assert_array_equal(v[:], datarev)
        # test reading of slices.
        # negative value means count back from end.
        assert_array_equal(v[:-1,:-2,:-3],datarev[:-1,:-2,:-3])
        # every other element (positive step)
        assert_array_equal(v[2:-1:2,2:-2:2,2:-3:2],datarev[2:-1:2,2:-2:2,2:-3:2])
        # every other element (negative step)
        assert_array_equal(v[-1:2:-2,-2:2:-2,-3:2:-2],datarev[-1:2:-2,-2:2:-2,-3:2:-2])
        # read elements in reverse order
        assert_array_equal(v[:,::-1,:],data)
        assert_array_equal(v[::-1,:,::-1],datarev[::-1,:,::-1])
        assert_array_equal(v[xdim-1::-3,:,zdim-1::-3],datarev[xdim-1::-3,:,zdim-1::-3])

        # ellipsis slice.
        assert_array_equal(v[...,2:],datarev[...,2:])
        # variable with an unlimited dimension.
        assert_array_equal(vu[:], data[:,::-1,:])
        # read data in reverse order
        assert_array_equal(vu[:,::-1,:],data)
        # index using an integer array scalar
        i = NP.ones(1,'i4')[0]
        assert_array_equal(v[i],datarev[1])

        f.close()
예제 #16
0
class hitran:
  """Loads hitran pre-computed tables in memory"""
  def __init__(self,datafile):
    from netCDF4 import Dataset
    self.df = Dataset(datafile)
    self.desc = self.df.info
    self.v1 = self.df.v1
    self.v2 = self.df.v2
  def get(self,name,part=None):
    import numpy as np
    if ( part is None ):
      return np.array(self.df.variables[name][:])
    else:
      return np.array(self.df.variables[name][part])
  def nchan(self):
    return len(self.df.dimensions['nchan'])
  def nlayod(self):
    return len(self.df.dimensions['nlayod'])
  def ntmpod(self):
    return len(self.df.dimensions['ntmpod'])
  def nf(self):
    return len(self.df.dimensions['nf'])
  def nmol(self):
    return len(self.df.dimensions['nmol'])
  def mxmols(self):
    return len(self.df.dimensions['mxmols'])
  def nmoltab(self):
    return len(self.df.dimensions['nmoltab'])
  def __del__(self):
    self.df.close( )
예제 #17
0
def set_basic_md(resource):
  """
  basis meta data
  :param resource: netCDF file where basic meta data should be set
  """
  import sys
  from datetime import datetime as dt 
  
  py_version = sys.version
  creation_date = dt.strftime( dt.now(), format='%Y-%m-%dT%H:%M:%S')
  
  md_basic = {
     'activity': 'birdhouse project',
     'software':'flyingpigeon v 0.1', 
     'software_project': 'birdhouse',
     'software_reference':'https://github.com/bird-house/',
     'software_platform': 'PYTHON %s' % py_version,
     'contact_mail_1':'*****@*****.**',
     'contact_mail_2':'*****@*****.**',
     'creation_date': creation_date ,
     }
  
  ds = Dataset(resource, mode='a')
  ds.setncatts(md_basic)
  ds.close()
  
  return(resource)
예제 #18
0
def read_nc(infile, varname, dimension=-1, is_time=0):
	'''Read a variable from a netCDF file

	Input:
		input file path
		variable name
		dimension: if < 0, read in all dimensions of the variable; if >= 0, only read in the [dimension]th of the variable (index starts from 0). For example, if the first dimension of the variable is time, and if dimension=2, then only reads in the 3rd time step.
		is_time: if the desired variable is time (1 for time; 0 for not time). If it is time, return an array of datetime object

	Return:
		var: a numpy array of
	''' 
	from netCDF4 import Dataset
	from netCDF4 import num2date

	nc = Dataset(infile, 'r')
	if is_time==0:  # if not time variable
		if dimension<0:
			var = nc.variables[varname][:]
		else:
			var = nc.variables[varname][dimension]
	if is_time==1:  # if time variable
		time = nc.variables[varname]
		if hasattr(time, 'calendar'):  # if time variable has 'calendar' attribute
			if dimension<0:
				var = num2date(time[:], time.units, time.calendar)
			else:
				var = num2date(time[dimension], time.units, time.calendar)
		else:  # if time variable does not have 'calendar' attribute
			if dimension<0:
				var = num2date(time[:], time.units)
			else:
				var = num2date(time[dimension], time.units)
	nc.close()
	return var
예제 #19
0
파일: load_region.py 프로젝트: kmunve/APS
def load_region(region_id, local=False, return_regions=False):

    if local:
        _vr = Dataset(
            os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2017.nc"),
            "r")
        # flip up-down because Meps data is upside down
        #_regions = np.flipud(_vr.variables["LokalOmr_2018"][:])
        _regions = _vr.variables["LokalOmr_2018"][:]
    else:
        _vr = Dataset(
            os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2019.nc"),
            "r")
        # flip up-down because Meps data is upside down
        #_regions = np.flipud(_vr.variables["skredomr19_km"][:])
        _regions = _vr.variables["skredomr19_km"][:]
        print("Missing value: {mv}".format(mv=_vr.variables["skredomr19_km"].missing_value))

    _region_bounds = np.where(_regions == region_id)  # just to get the bounding box

    # get the lower left and upper right corner of a rectangle around the region
    y_min, y_max, x_min, x_max = min(_region_bounds[0].flatten()), max(_region_bounds[0].flatten()), \
                                 min(_region_bounds[1].flatten()), max(_region_bounds[1].flatten())

    #reg_mask = np.ma.masked_where(_regions[y_min:y_max, x_min:x_max] == region_id, _regions[y_min:y_max, x_min:x_max]).mask
    #reg_mask = np.where(_regions[y_min:y_max, x_min:x_max] == region_id, _regions[y_min:y_max, x_min:x_max], np.nan)
    reg_mask = np.where(_regions[y_min:y_max, x_min:x_max] == region_id, 1., np.nan)
    #reg_mask = np.ma.masked_where(_reg_mask == region_id).mask
    _vr.close()

    if return_regions:
        return _regions, reg_mask, y_min, y_max, x_min, x_max
    else:
        return reg_mask, y_min, y_max, x_min, x_max
예제 #20
0
파일: nc.py 프로젝트: sarahheim/ncObjects
    def flagStats_single(self, fname):
        '''counter of all the primary and secondary flags

        '''
        import pandas as pd
        df = Dataset(fname, 'r')
        arr = [pd.Series({'time size': df['time'].size})]
        for vrbl in df.variables:
            if '_flagPrimary' in vrbl:
                dict = {}
                v = vrbl.split('_')[0]
                flagP = vrbl
                flagS = v+'_flagSecondary'
                pArr = df[flagP][:]
                for p in [1,2,3,4,9]:
                    # print flagP, p,':', df[flagP][:].tolist().count(p)
                    dict[flagP+'.'+str(p)] = df[flagP][:].tolist().count(p)
                for s in [1,2,3]:
                    # print flagS, s, ':', df[flagS][:].tolist().count(s)
                    pAtsArr = df[flagP][np.isin(df[flagS][:],s)]
                    # print flagS, s, '(3):', pAtsArr.tolist().count(3)
                    # print flagS, s, '(4):', pAtsArr.tolist().count(4)
                    dict[flagS+'.'+str(s)+'.3']=  pAtsArr.tolist().count(3)
                    dict[flagS+'.'+str(s)+'.4']=  pAtsArr.tolist().count(4)
                arr.append(pd.Series(dict))
        return pd.concat(arr)

        df.close()
예제 #21
0
def nbdry_grid_hack(grid_file, num_pts):

    # Read bathymetry and masks
    id = Dataset(grid_file, "a")
    h = id.variables["h"][:, :]
    mask_rho = id.variables["mask_rho"][:, :]
    mask_u = id.variables["mask_u"][:, :]
    mask_v = id.variables["mask_v"][:, :]
    mask_psi = id.variables["mask_psi"][:, :]

    # Loop over longitude
    for i in range(size(h, 1)):
        # Find the southernmost unmasked cell within "num_pts" of the
        # northern boundary and set all the points north of it to match
        found_pt = False
        for j in range(num_pts, -1, -1):
            if mask_rho[-j, i] == 1:
                if found_pt:
                    # Already found the right point
                    h[-j, i] = val
                else:
                    # This is the first unmasked point
                    found_pt = True
                    val = h[-j, i]

    # Save changes
    id.variables["h"][:, :] = h
    id.close()
예제 #22
0
    def ReadFile(self):
        '''
        读取TCCON数据,此数据一个站点一个文件,当站点目录下多个文件只保留一个结果
        '''

        if self.FileList == []:
            return

        for file in self.FileList:
            ncFile = Dataset(file, 'r', format='NETCDF3_CLASSIC')  # 'NCETCDF4'
            ncTime = ncFile.variables['time'][:]
            xco2_ppm = ncFile.variables['xco2_ppm'][:]
            ncFile.close()
            # print file

        for i in xrange(len(ncTime)):
            seconds = ncTime[i] * 24 * 60 * 60
            strTime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(seconds))
            self.FileLine.append([strTime, xco2_ppm[i]])
        # print self.FileLine[0], self.FileLine[-1]

        title = ['time', 'xco2']
        dtype = ['S19'] + ['f4']
        ary = np.core.records.fromarrays(np.array(self.FileLine).transpose(),
            names=','.join(title),
            formats=','.join(dtype))
        condition = np.logical_and(ary['xco2'] > 0, ary['xco2'] < 600)
        self.FileData = ary[np.where(condition)]
예제 #23
0
def monreduce(filein):
    ''' Averages the files down to a month.
        
        Creates a new temporary file per month.
    '''
    directory = '/group_workspaces/jasmin/hiresgw/mj07/monthly_means/'
    month = filein[:-7]
    create_temp_nc(month)
    # Get the 3 files for each month
    files = glob(directory+'temp_files/'+filein[-22:-7]+'??.temp.v.nc')
    fileblob = ''
    for filename in files:
        fileblob+=filename+','
    splitind = range(0,1024,128)
    pool = Pool(processes=8)
    TASKS = [(fileblob,n) for n in splitind]
    meansection = [pool.apply_async(splitmonthcalc, t) for t in TASKS]
    mean = np.concatenate((meansection[0].get(),meansection[1].get(),\
                        meansection[2].get(),meansection[3].get(),\
                        meansection[4].get(),meansection[5].get(), \
                        meansection[6].get(),meansection[7].get()), 2)
    print 'done for %s' % (month)    
    # Save mean in file
    filename = directory+'temp_files/'+filein[-22:-7]+'.temp.v.nc'
    f = Dataset(filename,'a')
    u = f.variables['v']
    u[:] = mean[:]
    
    f.close()
예제 #24
0
def open(filename):
    '''Import netCDF output file as OpenDrift object of correct class'''

    import os
    import logging
    import pydoc
    from netCDF4 import Dataset
    if not os.path.exists(filename):
        logging.info('File does not exist, trying to retrieve from URL')
        import urllib
        try:
            urllib.urlretrieve(filename, 'opendrift_tmp.nc')
            filename = 'opendrift_tmp.nc'
        except:
            raise ValueError('%s does not exist' % filename)
    n = Dataset(filename)
    try:
        module_name = n.opendrift_module
        class_name = n.opendrift_class
    except:
        raise ValueError(filename + ' does not contain '
                         'necessary global attributes '
                         'opendrift_module and opendrift_class')
    n.close()

    cls = pydoc.locate(module_name + '.' + class_name)
    if cls is None:
        from models import oceandrift3D
        cls = oceandrift3D.OceanDrift3D
    o = cls()
    o.io_import_file(filename)
    logging.info('Returning ' + str(type(o)) + ' object')
    return o
예제 #25
0
    def __init__(self, coord_file="coordinates.nc"):
        ds = Dataset(coord_file)
        self.target_lons = ds.variables["glamt"][:]
        self.target_lats = ds.variables["gphit"][:]

        print("target lons shape = ", self.target_lons.shape)
        ds.close()
예제 #26
0
def piomasReader(directory,month,years):
    """
    Reads piomas data for sea ice thickness over 1979-2015
    """
    
    ### Enter filename
    filename = 'piomas_regrid_sit_19792015.nc'   
    
    ### Month/Years extracted
    dateyr = now.year  
    datemo = datetime.date(dateyr,month+1,1).strftime('%B')
    
    yearsp = np.arange(1979,2016)
    yearmin = years.min()
    yearmax = years.max()
    yearnone = 2010
    yearslice = np.where((yearsp <= yearmax) & (yearsp >= yearmin) & \
                (yearsp != yearnone))[0]
    
    ### Retrieve data
    data = Dataset(directory + filename)
    latp = data.variables['lat'][:]
    lonp = data.variables['lon'][:]
    thk_p = data.variables['newthickness'][yearslice,month,:,:]
    data.close()
    
    print 'Completed: PIOMAS data read (%s)!' % datemo
    
    return latp,lonp,thk_p
예제 #27
0
class FileBuffer(object):
    """ Class that encapsulates and manages deferred access to file data. """

    def __init__(self, filename, dimensions):
        self.filename = filename
        self.dimensions = dimensions  # Dict with dimension keyes for file data
        self.dataset = None

    def __enter__(self):
        self.dataset = Dataset(str(self.filename), 'r', format="NETCDF4")
        return self

    def __exit__(self, type, value, traceback):
        self.dataset.close()

    @property
    def lon(self):
        lon = self.dataset[self.dimensions['lon']]
        return lon[0, :] if len(lon.shape) > 1 else lon[:]

    @property
    def lat(self):
        lat = self.dataset[self.dimensions['lat']]
        return lat[:, 0] if len(lat.shape) > 1 else lat[:]

    @property
    def data(self):
        if len(self.dataset[self.dimensions['data']].shape) == 3:
            return self.dataset[self.dimensions['data']][:, :, :]
        else:
            return self.dataset[self.dimensions['data']][:, 0, :, :]

    @property
    def time(self):
        if self.time_units is not None:
            dt = num2date(self.dataset[self.dimensions['time']][:],
                          self.time_units, self.calendar)
            dt -= num2date(0, self.time_units, self.calendar)
            return list(map(timedelta.total_seconds, dt))
        else:
            return self.dataset[self.dimensions['time']][:]

    @property
    def time_units(self):
        """ Derive time_units if the time dimension has units """
        try:
            return self.dataset[self.dimensions['time']].units
        except:
            try:
                return self.dataset[self.dimensions['time']].Unit
            except:
                return None

    @property
    def calendar(self):
        """ Derive calendar if the time dimension has calendar """
        try:
            return self.dataset[self.dimensions['time']].calendar
        except:
            return 'standard'
예제 #28
0
파일: data_select.py 프로젝트: guziy/GevFit
def get_indices_from_file(path = 'data/streamflows/hydrosheds_euler9/aex_discharge_1970_01_01_00_00.nc'):
    fpin = Dataset(path)
    vars = fpin.variables

    x, y = vars['x-index'][:], vars['y-index'][:]
    fpin.close()
    return x, y
def mooring_2dvar(ncfile, level):
    """Standard EcoFOCI Mooring .nc files with two dimensional parameters as a function of time
    (such as ein - echo intensity
    Timestep of data is assumed to be in fractions of a day"""

    ###nc readin/out
    nchandle = Dataset(ncfile,'r')
    params = ['time', 'time2', 'depth','latitude', 'longitude', 'AGC1_1221']
    time = nchandle.variables[params[0]][:]
    time2 = nchandle.variables[params[1]][:]
    lat = nchandle.variables[params[3]][:]
    lon = nchandle.variables[params[4]][:]
    depth = nchandle.variables[params[2]][:]
    ncdata = nchandle.variables[params[5]][:,:,0,0]
    nchandle.close()
    
    dt = 1. #data is hourly
    time_base = 'hours'
    
    pytime = util.EPICdate2udunits(time, time2)
    
    xx = ncdata[:,level]
    
    dt = 24. * (1. / pytime['interval_min']) #data is 4 times daily
    print dt
    time = pytime['timeint']

    variance = np.var(xx)
    #normalize
    print 'Variance = %s ' % (variance)
    x = (xx - np.mean(xx)) / np.sqrt(variance)
    variance = np.var(x)

    return (ncdata, x,dt,np.array(time) * 24., variance, time_base, depth) 
예제 #30
0
파일: nc.py 프로젝트: sarahheim/ncObjects
    def dataToNC(self, ncName, subset, lookup):
        """Take dataframe and put in netCDF (new file or append).
        Assumes there's a 'time' variable in data/ncfile

        .. note: run in conda environment log2ncEnv3, do to line: appDF = subset[-exist]

        :param str ncName: name of netCDF with filepath
        :param dataframe subset: dataframe to be added to netCDF file
        :param lookup: a variable that might be needed for createNCshell, can be empty/Null

        """
        if not os.path.isfile(ncName):
            ncfile = self.createNCshell(ncName, lookup)
        ncfile = Dataset(ncName, 'a', format='NETCDF4')
        timeLen = len(ncfile.variables['time'][:])

        ## Add the following: remove any entries from the subset that already exist!!!!!!!
        # exist = subset.epoch.isin(ncDep.variables['time'][:]) #
        subset['epochs'] = subset.index.values.astype('int64') // 10**9
        # exist  = subset.index.isin(epochs) #wrong previously
        # environment numpy (1.11) didn't have 'isin' module
        exist = np.in1d(subset['epochs'], ncfile.variables['time'][:])
        appDF = subset[-exist]

        if len(appDF) > 0: # else all times are already in nc
            # length should be the same for time & all attributes
            ncfile.variables['time'][timeLen:] = appDF['epochs'].values
            # ncfile.variables['time'][timeLen:] = subset.index.values.astype(np.int64) // 10**9
            for attr in self.attrArr:
                #atLen = len(ncfile.variables[attr][:])
                ncfile.variables[attr][timeLen:] = appDF[attr].values
                self.attrMinMax(ncfile, attr)
            self.NCtimeMeta(ncfile)

        ncfile.close()
예제 #31
0
def getValuesFromGLORYS(glorysIndicies, myglorys2v3pathT, myglorys2v3pathS,
                        xynames, startYear, debug):

    print "Extracting values from GLORYS2V3 (func:getValuesFromGLORYS)"

    mypaths = [myglorys2v3pathT, myglorys2v3pathS]
    variables = ["votemper", "vosaline"]
    first = True
    counter = 0
    varCounter = 0

    for variable, datapath in zip(variables, mypaths):

        argument = "%s*.nc" % (datapath)
        allFiles = glob.glob(argument)
        allFiles.sort()
        allFilesFiltered = []

        # Remove files older than startYear (we only want 2009-2012)
        for afile in allFiles:
            (head, filename) = os.path.split(afile)
            l = string.split(filename, '_')
            date = str(l[2])
            if (startYear <= int(date[0:4])):
                allFilesFiltered.append(afile)
        allFiles = []
        allFiles = allFilesFiltered
        allFiles.sort()

        if first:
            allValues = np.zeros(shape=(len(glorysIndicies), len(allFiles),
                                        len(variables)))
            allDates = []
            print "Sorting %s files found in GLORYS2V3 datadirectory" % (
                len(allFiles))
            first = False

        refdate = datetime.datetime(1948, 1, 1)

        for afile in allFiles:
            (head, filename) = os.path.split(afile)
            l = string.split(filename, '_')
            date = str(l[2])
            mydate = datetime.datetime(int(date[0:4]), int(date[4:6]),
                                       int(date[6:8]))
            if (varCounter == 0):
                allDates.append(mydate)

            cdf = Dataset(afile)

            for station in xrange(len(glorysIndicies)):
                data = cdf.variables[variable][0, 0,
                                               int(glorysIndicies[station][0]),
                                               int(glorysIndicies[station][1])]
                if debug:
                    print "%s,%i : %s  - data for station (%s,%s) %s = > %s" % (
                        counter, varCounter, mydate,
                        int(glorysIndicies[station][0]),
                        int(glorysIndicies[station][1]), variable, data)
                allValues[station, counter, varCounter] = data
            cdf.close()
            counter += 1
        counter = 0
        varCounter += 1
    return allValues, allDates
예제 #32
0
    def read(self, filename, load_lonlat=True):
        """Read the OSISAF SST netCDF formatet data (from Ifremer)"""
        LOG.debug("OSISAF netCDF file format...")

        self.file = Dataset(filename, 'r')

        self.fillheader()

        # SST (K):
        sst_data = self.file.variables['sea_surface_temperature']

        sstdata = sst_data[0]
        self.sst = InfoObject()
        # For some strange reason the array seem to start in the lower left!?
        self.sst.data = sstdata[::-1]
        self.sst.info = self.get_data_header(self.sst.info, sst_data)
        self._projectables.append('sst')

        # dtime:
        dtime = self.file.variables['sst_dtime']
        dtime_data = dtime[0] * dtime.scale_factor + dtime.add_offset
        dtime_obj = InfoObject()
        dtime_obj.data = dtime_data[::-1]
        dtime_obj.info = self.get_data_header(dtime_obj.info, dtime)
        self.sec_1981 = dtime_obj.data + self.file.variables['time'][0]
        self.dtime = dtime_obj
        self._projectables.append('dtime')

        # DT_analysis (K): (SST Deviation from previous day)
        dta = self.file.variables['dt_analysis']
        gain = 0.1
        nodata = 255
        offset = -12.7
        data = dta[0] * dta.scale_factor + dta.add_offset
        valid_min = dta.valid_min
        valid_max = dta.valid_max

        dt_data = np.where(
            np.logical_and(np.greater(dta[0], valid_min),
                           np.less(dta[0], valid_max)), (data - offset) / gain,
            nodata).astype('B')
        dt = InfoObject()
        dt.data = dt_data[::-1]
        dt.info = self.get_data_header(dt.info, dta)
        dt.info["nodata"] = nodata
        dt.info["gain"] = gain
        dt.info["offset"] = offset
        self.dt = dt
        self._projectables.append('dt')

        # Bias:
        bias = self.file.variables['sses_bias']
        gain = 0.01
        offset = -1.27
        nodata = 255
        x = bias[0] * bias.scale_factor + bias.add_offset
        valid_min = bias.valid_min
        valid_max = bias.valid_max
        bias_data = np.where(
            np.logical_and(np.greater(bias[0], valid_min),
                           np.less(bias[0], valid_max)), (x - offset) / gain,
            nodata).astype('B')

        bias_obj = InfoObject()
        bias_obj.data = bias_data[::-1]
        bias_obj.info = self.get_data_header(bias_obj.info, bias)
        bias_obj.info["nodata"] = nodata
        bias_obj.info["gain"] = gain
        bias_obj.info["offset"] = offset
        self.bias = bias_obj
        self._projectables.append('bias')

        # Standard deviation:
        stdv = self.file.variables['sses_standard_deviation']
        gain = 0.01
        offset = 0.0
        nodata = 255
        x = stdv[0] * stdv.scale_factor + stdv.add_offset
        valid_min = stdv.valid_min
        valid_max = stdv.valid_max
        stdv_data = np.where(
            np.logical_and(np.greater(stdv[0], valid_min),
                           np.less(stdv[0], valid_max)), (x - offset) / gain,
            nodata).astype('B')

        stdv_obj = InfoObject()
        stdv_obj.data = stdv_data[::-1]
        stdv_obj.info = self.get_data_header(stdv_obj.info, stdv)
        stdv_obj.info["nodata"] = nodata
        stdv_obj.info["gain"] = gain
        stdv_obj.info["offset"] = offset
        self.stdv = stdv_obj
        self._projectables.append('stdv')

        # L2P flags:
        l2pf = self.file.variables['l2p_flags'][0]
        l2pf_obj = InfoObject()
        l2pf_obj.data = l2pf[::-1]
        l2pf_obj.info = self.get_data_header(l2pf_obj.info, l2pf)
        self.l2pf = l2pf_obj
        self._projectables.append('l2pf')

        # Longitudes:
        lon = self.file.variables['lon']
        self.lon = InfoObject()
        self.lon.data = lon[::-1].astype('f')
        self.lon.info = self.get_data_header(self.lon.info, lon)

        # Latitudes:
        lat = self.file.variables['lat']
        self.lat = InfoObject()
        self.lat.data = lat[::-1].astype('f')
        self.lat.info = self.get_data_header(self.lat.info, lat)

        return
예제 #33
0
from netCDF4 import Dataset
import numpy as np
import pandas as pd
import stipolate as st
import helpers as hh
import mapping as mp


D = pd.HDFStore('../data/station_data.h5')
S = pd.HDFStore('../data/LinearLinear.h5')

T = hh.extract(D['ta_c'],'prom',1)
Tm = S['T2']
b = Tm['d02']-T

nc = Dataset('../data/wrf/fx.nc')
ma = mp.basemap(nc)
nc.close()
nc = Dataset('../data/wrf/d02_2014-09-10.nc')



p = {
	'hfx': st.interp_nc(nc,'HFX',sta,map=ma)
	'qfx': st.interp_nc(nc,'QFX',sta,map=ma)
	'gfx': st.interp_nc(nc,'GRDFLX',sta,map=ma)
	'res': st.interp_nc(nc,'NOAHRES',sta,map=ma)
}

# r2 = np.zeros((3,5))
# c = np.zeros((3,2))
예제 #34
0
import numpy as np
import matplotlib.pyplot as plt

#setting
clearthreshold = 296.0

#file
rawB03name = 'H08_B03_Indonesia_201706090500.nc'
rawB04name = 'H08_B04_Indonesia_201706090500.nc'
rawB13name = 'H08_B13_Indonesia_201706090500.nc'

#######################
#read raw data session#
#######################

dsetB03 = Dataset(rawB03name, mode='r')
dsetB04 = Dataset(rawB04name, mode='r')
dsetB13 = Dataset(rawB13name, mode='r')

#get lat and lon
lat = dsetB03.variables['latitude'][:]
lon = dsetB03.variables['longitude'][:]

#get data
B03 = dsetB03.variables['VS'][0]
B04 = dsetB04.variables['N1'][0]
B13 = dsetB13.variables['IR'][0]

###################
#calculate session#
###################
예제 #35
0
cv_nc = sys.argv[2]
ciext = sys.argv[3]

imult = 1
if narg == 5: imult = int(sys.argv[4])

print(imult)

cfname, cncext = os.path.splitext(cf_nc)

cf_im = str.replace(os.path.basename(cf_nc), cncext, '.' + ciext)

print(' *** Will create image ' + cf_im)

# Reading data array:
f_nc = Dataset(cf_nc)
Ndim = len(f_nc.variables[cv_nc].dimensions)
if Ndim == 4:
    xfield = imult * f_nc.variables[cv_nc][0, 0, :, :]
elif Ndim == 3:
    xfield = imult * f_nc.variables[cv_nc][0, :, :]
elif Ndim == 2:
    xfield = imult * f_nc.variables[cv_nc][:, :]
else:
    print(' ERROR (mk_zonal_average.py) => weird shape for your mask array!')
    sys.exit(0)
#xfield  = imult*f_nc.variables[cv_nc][:,:]
f_nc.close()

(ny, nx) = nmp.shape(xfield)
예제 #36
0
import warnings
import os
import conda

conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib

from mpl_toolkits.basemap import Basemap

files_rot = glob.glob('*.netcdf')
files_radar = glob.glob('*.nc')

radar_file = 'nexrad_3d_v4_0_20170827T222000Z.nc'
nc = Dataset(radar_file, 'r')
radarlat = nc.variables['Latitude'][:]
radarlon = nc.variables['Longitude'][:] - 360
lat2, lon2 = np.meshgrid(radarlat, radarlon)

zh = np.ones((288, 528, 672)) * np.nan
zdr = np.ones((288, 528, 672)) * np.nan
kdp = np.ones((288, 528, 672)) * np.nan

for n, j in enumerate(files_radar):
    data = read_file(j)
    print(j)
    zdr[n, :, :] = data['zdr']['values'][4, :, :]
    kdp[n, :, :] = data['kdp']['values'][4, :, :]
    zh[n, :, :] = data['Z_H']['values'][4, :, :]
예제 #37
0
for fn in sorted(glob.glob(dir+'/objects_*.nc')):

    print(fn, flush=True)

    out_str = fn[-24:-3]
    fout = (out_dir + '/lp_objects_' + out_str + '__map.png')

    ## Set up figure
    fig = plt.figure(figsize=(11,8))
    ax1 = fig.add_subplot(111)



    ## Read
    DS = Dataset(fn)

    lon = DS['lon'][:]
    lat = DS['lat'][:]
    time = DS['time'][:]
    area = DS['area'][:] / (1e5)

    ## Plot
    # set up orthographic map projection with
    # perspective of satellite looking down at 50N, 100W.
    # use low resolution coastlines.
    #map = Basemap(projection='hammer',lon_0=180)
    map = Basemap(llcrnrlon=0.,llcrnrlat=-50.,urcrnrlon=360.,urcrnrlat=50.,\
                resolution='l',projection='merc')
    # draw coastlines, country boundaries, fill continents.
    map.fillcontinents(color='lightgray',zorder=1)
예제 #38
0
# ---> outputs
method = 'xhi'
folder_save = cpso_path
save_plot = True
save_netcdf = True
outfile = folder_save + "/pdf_error_fun_" + method + str(nruns) + ".nc"
timing = True
figname = folder_save + '/' + 'pdf_error_fun_' + method + str(nruns) + '.png'

# --- create directory to save plots
if not os.path.exists(folder_save):
    os.makedirs(folder_save)

# --- load data
nc = Dataset(pdf_file)
f_grid = np.array(nc.variables['f_grid'][:nmodels_cut])
m_grid = np.array(nc.variables['m_grid'][:nmodels_cut, :])
pdf_m = np.array(nc.variables['pdf_m'][:, :])
m_synth = np.array(nc.variables['m_synth'][:])
ndata = nc.ndata
upper = nc.upper
lower = nc.lower
nc.close()

n_inter = pdf_m.shape[1]
#---------------------------------------------------------------------------
# ---> compute uncertainties

pdf_error = pp.pdf_std(m_grid,
                       f_grid,
예제 #39
0
frozenPrecipMultip        |       1.0000 |       0.5000 |       1.5000

! radiation transfer within snow
! ====================================================================
Frad_direct ***               |       0.7000 |       0.0000 |       1.0000
Frad_vis    ***              |       0.5000 |       0.0000 |       1.0000

newSnowDenMin             |     100.0000 |      50.0000 |     100.0000
newSnowDenScal            |       5.0000 |       1.0000 |       5.0000
constSnowDen              |     100.0000 |      50.0000 |     250.0000
newSnowDenAdd             |     109.0000 |      80.0000 |     120.0000
newSnowDenMultTemp        |       6.0000 |       1.0000 |      12.0000
newSnowDenMultWind        |      26.0000 |      16.0000 |      36.0000
newSnowDenMultAnd         |       1.0000 |       1.0000 |       3.0000

paramfile = Dataset("C:/1UNRuniversityFolder/Dissertation/Chapter 1-Snowmelt/swamp_angel/vegImpact/summa_zParamTrial_variableDecayRate_sa_vegImpact.nc",'w',format='NETCDF3_CLASSIC') #create new paramtrail.nc file

hruidxID = list(np.arange(101,113))
hru_num = np.size(hruidxID)

#%% #create new paramtrail.nc file and adding vaiables to it --- summa_zParamTrial_variableDecayRate_test
hru = paramfile.createDimension('hru', None)
hidx = paramfile.createVariable('hruIndex', np.float64,('hru',)) # add hruIndex variable

param_nam_list = ['LAIMIN','LAIMAX','winterSAI','summerLAI','rootingDepth','heightCanopyTop','heightCanopyBottom',
                  'throughfallScaleSnow','newSnowDenMin','albedoDecayRate','albedoMaxVisible','albedoMinVisible', 
                  'albedoMaxNearIR', 'albedoMinNearIR','z0Snow', 'albedoRefresh', 'mw_exp','refInterceptCapSnow']#, 'fixedThermalCond_snow'] 
# call the function on the parameters
valst1 = param_fill(p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15,p16,p17,p18)  

for param in param_nam_list:
# FOR TESTING ONLY: REMOVE WHEN RUNNING FINAL JOB
#filelist = [fname for fname in filelist[:1000]]

report("Using %d processes" % numProcs)
report("Writing variable %s" % varname)
report("Found %d input files, starting to open" % len(filelist))

assert( len(filelist) % numProcs == 0)

# open all the files associated with this process and keep handles around (avoid metadata costs)
myfiles = [fname for (index, fname) in enumerate(filelist) if (index % numProcs == rank)]
numFilesPerProc = len(myfiles)
myhandles = [None]*len(myfiles)
for (idx, fname) in enumerate(myfiles):
    myhandles[idx] = Dataset(join(datapath, fname), "r") 

reportbarrier("Finished opening all files")

#varnames = ["T", "U", "V", "Q", "Z3"]
#varnames = ["T"]
varnames = [varname]
numvars = len(varnames)
numvars = 1
numtimeslices = 8
numlevels = 15
#numlevels = 30
numlats = 768
numlongs = 1152
numlevdivs = 64
flattenedlength = numlevels*numlats*numlongs
예제 #41
0
    day_upper = 31
    hour_lower = 0
    hour_upper = 23
    minute_lower = 0
    minute_upper = 45

    time_params = np.array([
        year_lower, year_upper, month_lower, month_upper, day_lower, day_upper,
        hour_lower, hour_upper, minute_lower, minute_upper
    ])

    datetimes = utilities.get_datetime_objects(time_params)
    datestrings = [j.strftime("%Y%m%d%H%M") for j in datetimes]

    lonlats = Dataset(
        '/ouce-home/data/satellite/meteosat/seviri/15-min/native/'
        'lonlats.NA_MiddleEast.nc')

    # These need to be regridded to regular for consistency with cloud mask
    lons = lonlats.variables['longitude'][:]
    lats = lonlats.variables['latitude'][:]
    lonmask = lons > 360
    latmask = lats > 90
    lons = np.ma.array(lons, mask=lonmask)
    lats = np.ma.array(lats, mask=latmask)
    sdf_previous = None
    clouds_previous = None
    ids_previous = []
    cloud_ids_previous = []
    deep_conv_IDs_prev = None
    LLJ_plumes_IDs_prev = []
예제 #42
0
                 '.004']  # "" for initial run, ".001" for first cycle, etc.
cycle_num = len(cycle_no_list)

var = 'u'
varName = r'$\mathrm{\rho_{uu}}$'
varUnit = ''
varName_save = 'uu_corr'

# read the output data of all cycle_no_list
nc_file_list = []
tSeq_list = []
varSeq_list = []
for i in range(cycle_num):
    input_file = prjDir + '/' + jobName + suffix + "/OUTPUT/" + jobName + suffix + "_masked_" + maskid + cycle_no_list[
        i] + ".nc"
    nc_file_list.append(Dataset(input_file, "r", format="NETCDF4"))
    tSeq_list.append(
        np.array(nc_file_list[i].variables['time'][:],
                 dtype=type(nc_file_list[i].variables['time'])))
    varSeq_list.append(
        np.array(nc_file_list[i].variables[var][:],
                 dtype=type(nc_file_list[i].variables[var])))

# print(list(nc_file_list[0].dimensions)) #list all dimensions
# print(list(nc_file_list[0].variables)) #list all the variables
# print(list(nc_file_list[0].variables['u2'].dimensions)) #list dimensions of a specified variable

# extract the values of all dimensions of the var
zName = list(
    nc_file_list[0].variables[var].dimensions)[1]  # the height name string
zSeq = np.array(
예제 #43
0
 def setUp(self):
     self.files = files
     for nfile, file in enumerate(self.files):
         f = Dataset(file, 'w', format='NETCDF4_CLASSIC')
         #f.createDimension('x',None)
         f.createDimension('x', ninc)
         f.createDimension('y', ydim)
         f.createDimension('z', zdim)
         f.history = 'created today'
         x = f.createVariable('x', 'i', ('x', ))
         x.units = 'zlotys'
         dat = f.createVariable('data', 'i', (
             'x',
             'y',
             'z',
         ))
         dat.long_name = 'phony data'
         dat.missing_value = missval
         nx1 = int(nfile * ninc)
         nx2 = int(ninc * (nfile + 1))
         #x[0:ninc] = np.arange(nfile*ninc,ninc*(nfile+1))
         x[:] = np.arange(nfile * ninc, ninc * (nfile + 1))
         #dat[0:ninc] = data[nx1:nx2]
         dat[:] = data[nx1:nx2]
         f.close()
예제 #44
0
    'ya': lat1,
    'xb': lon2,
    'yb': lat2
})

bdy_map = 100 / 110000
limits = [
    pin_df[['ya', 'yb']].values.min() - bdy_map,
    pin_df[['ya', 'yb']].values.max() + bdy_map,
    pin_df[['xa', 'xb']].values.min() - bdy_map,
    pin_df[['xa', 'xb']].values.max() + bdy_map
]

# Opening wrfinput

inp = Dataset('wrfinput_d02', 'r')
xlat = inp.variables['XLAT'][:]
xlong = inp.variables['XLONG'][:]
t2 = inp.variables['T2'][:]

inp.close()

lats = [xlat.min(), xlat.max()]
lons = [xlong.min(), xlong.max()]

lat = xlat[0, :, 0]
lon = xlong[0, 0, :]
lo, la = np.meshgrid(lon, lat)

# Pinheiros cetesb
예제 #45
0
import time
import numpy as np
import os
from netCDF4 import Dataset

# Get time information
download_stamp = time.strftime('%Y-%m-%d',
                               time.localtime(os.path.getmtime("fBNF.nc")))
generate_stamp = time.strftime('%Y-%m-%d')

# Extract information from the original file
dset = Dataset("fBNF.nc")
y0, yf = dset.variables["Time"][[0, -1]] + 1979
tb = (np.asarray([[y0, yf + 1]]) - 1850.) * 365
t = tb.mean(axis=1)
lat = dset.variables["latitude"][...]
lon = dset.variables["longitude"][...]
data = dset.variables["fBNF"][0, ...]
data.shape = (1, ) + data.shape

# We will use the 1q and 3q values to define the 'bounds' of the data,
# ILAMB will use this as a measure of uncertainty
data_bnds = np.ma.masked_array(np.zeros(data.shape + (2, )), mask=False)
data_bnds[0, ..., 0] = dset.variables["fBNF_1q"][0, ...]
data_bnds[0, ..., 1] = dset.variables["fBNF_3q"][0, ...]

# NOTE: the 1q and 3q values are currently set to the same values as fBNF
print("fBNF    == fBNF_1q (%s)" % np.allclose(data, data_bnds[..., 0]))
print("fBNF_1q == fBNF_3q (%s)" %
      np.allclose(data_bnds[..., 1], data_bnds[..., 0]))
예제 #46
0
    def setUp(self):

        self.files = [tempfile.mktemp(".nc") for nfile in range(2)]
        for nfile, file in enumerate(self.files):
            f = Dataset(file, 'w', format='NETCDF4_CLASSIC')
            f.createDimension('time', None)
            f.createDimension('y', ydim)
            f.createDimension('z', zdim)
            f.history = 'created today'

            time = f.createVariable('time', 'f', ('time', ))
            #time.units = 'days since {0}-01-01'.format(1979+nfile)
            yr = 1979 + nfile
            time.units = 'days since %s-01-01' % yr

            time.calendar = 'standard'

            x = f.createVariable('x', 'f', ('time', 'y', 'z'))
            x.units = 'potatoes per square mile'

            nx1 = self.ninc * nfile
            nx2 = self.ninc * (nfile + 1)

            time[:] = np.arange(self.ninc)
            x[:] = np.arange(nx1, nx2).reshape(self.ninc, 1, 1) * np.ones(
                (1, ydim, zdim))

            f.close()
예제 #47
0
annee = 2019

# in batch mode, without display
#matplotlib.use('Agg')  

file = 'OS_PIRATA-FR29_TSG.nc'
<<<<<<< HEAD
=======
path_clim = '../climato/'
clim = 'isas13_monthly_surf.nc'
>>>>>>> 0fc9903c756f8ee2046db03512c9fda828b01a9a
ncpath = '.'
path = 'png'

ncfile = os.path.join(ncpath, file)
nc = Dataset(ncfile, mode='r')

SSPS = nc.variables['SSPS']
SSTP = nc.variables['SSTP']
TIME = nc.variables['TIME']
CM = nc.cycle_mesure
LON = nc.variables['LONGITUDE']
LAT = nc.variables['LATITUDE']


#definition of the day for the current year and the start year of netcdf
df = datetime.datetime(annee, 1, 1, 0)
dd = datetime.datetime(1950, 1, 1, 0)
#convert day to julian day real
jul = pyasl.jdcnv(df)
jul2 = pyasl.jdcnv(dd)
예제 #48
0
from netCDF4 import Dataset as NetCDFFile

parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-f", "--file", dest="filename", help="Path to grid file", metavar="FILE", required=True)
parser.add_argument("-w", "--weights", dest="weight_field", help="Field to weight block partition file on.", metavar="VAR")

args = parser.parse_args()

if not args.weight_field:
	print "Weight field missing. Defaulting to unweighted graphs."
	weighted_parts = False
else:
	weighted_parts = True

dev_null = open(os.devnull, 'w')
grid = NetCDFFile(args.filename, 'r')

nCells = len(grid.dimensions['nCells'])
nEdges = len(grid.dimensions['nEdges'])

nEdgesOnCell = grid.variables['nEdgesOnCell'][:]
cellsOnCell = grid.variables['cellsOnCell'][:] - 1
if weighted_parts:
	try:
		weights = grid.variables[args.weight_field][:]
	except:
		print args.weight_field, ' not found in file. Defaulting to un-weighted partitions.'
		weighted_parts = False
grid.close()

nEdges = 0
예제 #49
0
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset

# --------------
# User settings
# --------------

romsfile = './data/ocean_avg_example.nc'
tstep = 3  # 4-th time frame

# -------------------
# Extract the data
# -------------------

fid = Dataset(romsfile)
sst = fid.variables['temp'][tstep, -1, :, :]
M = fid.variables['mask_rho'][:, :]
fid.close()

# --------------
# Data handling
# --------------

# Mask out the SST data with NaN on land
# to improve the contour plot
sst[M < 1] = np.NaN

# -------------
# Plot
# -------------
예제 #50
0
def get_hydro(my_file, count):
    f = Dataset(my_file, mode='r')

    while True:
        try:
            #x = int(input("Please enter a number: "))
            eps = f.variables['EPSILON'][:]
            eps = remove_bad_eps(eps)
            lat = f.variables['LATITUDE'][:]
            lon = f.variables['LONGITUDE'][:]
            p = f.variables['PRESSURE'][:]
            SP = f.variables['PSAL'][:]
            T = f.variables['TEMPERATURE'][:]
            break
        except KeyError:
            #print("Variables don't match")
            #print(my_file)

            if ((my_file.split('/'))[7]).split('_')[0] == 'FjordEco':
                eps = f.variables['epsilon'][:]
                lat = f.variables['lat'][:]
                lon = f.variables['lon'][:]
                p = f.variables['pressure'][:]
                SP = f.variables['S'][:]
                T = f.variables['T'][:]
                pn = np.zeros(np.shape(T))
                for i in range(0, np.shape(T)[0]):
                    pn[i, :] = p[:]
                #print(np.shape(p),np.shape(T))

                # add function for size of files! If 2d, make 1d:
                eps = eps.flatten('F')
                lat = lat.flatten('F')
                lon = lon.flatten('F')
                p = pn.flatten('F')
                SP = SP.flatten('F')
                T = T.flatten('F')

                eps = remove_bad_eps(eps)

            if ((my_file.split('/'))[7]).split('_')[0] == 'Samoan':
                eps = f.variables['epsilon'][:]
                #lat = f.variables['lat'][:]
                #lon = f.variables['lon'][:]
                p = f.variables['pressure'][:]
                SP = f.variables['salinity'][:]
                T = f.variables['temperature'][:]
                # MANUALLY ADD LAT LON

            if ((my_file.split('/'))[7]).split('_')[0] == 'MIXET':
                eps = f.variables['epsilon'][:]
                #lat = f.variables['lat'][:]
                #lon = f.variables['lon'][:]
                p = f.variables['pressure'][:]
                SP = f.variables['salinity'][:]
                T = f.variables['temperature'][:]
                # MANUALLY ADD LAT LON

            if ((my_file.split('/'))[7]).split('_')[0] == 'EXITS1':
                eps = f.variables['epsilon'][:]
                eps = remove_bad_eps(eps)
                #lat = f.variables['LATITUDE'][:]
                #lon = f.variables['LONGITUDE'][:]
                p = f.variables['pressure'][:]
                SP = f.variables['salinity'][:]
                T = f.variables['temperature'][:]
                # MANUALLY ADD LAT LON

            if ((my_file.split('/'))[7]).split('_')[0] == 'EXITS2':
                eps = f.variables['epsilon'][:]
                eps = remove_bad_eps(eps)
                #lat = f.variables['LATITUDE'][:]
                #lon = f.variables['LONGITUDE'][:]
                p = f.variables['pressure'][:]
                SP = f.variables['salinity'][:]
                T = f.variables['temperature'][:]
                # MANUALLY ADD LAT LON

            if ((my_file.split('/'))[7]).split('_')[0] == 'EXITS3':
                eps = f.variables['epsilon'][:]
                eps = remove_bad_eps(eps)
                #lat = f.variables['LATITUDE'][:]
                #lon = f.variables['LONGITUDE'][:]
                p = f.variables['pressure'][:]
                SP = f.variables['salinity'][:]
                T = f.variables['temperature'][:]
                # MANUALLY ADD LAT LON

            break
    """
 eps = f.variables['EPSILON'][:]
 eps = remove_bad_eps( eps )
 lat = f.variables['LATITUDE'][:]
 lon = f.variables['LONGITUDE'][:]
 p = f.variables['PRESSURE'][:]
 SP = f.variables['PSAL'][:]
 T = f.variables['TEMPERATURE'][:]
 """

    #print(np.shape(eps),np.shape(lat),np.shape(p))
    z = gsw.z_from_p(p, lat)  # m
    SA = gsw.SA_from_SP(SP, p, lon, lat)  #  g/kg, absolute salinity
    CT = gsw.CT_from_t(SA, T, p)  # C, conservative temperature

    SA = remove_bad_SA(SA)
    CT = remove_bad_CT(CT)

    [N2_mid, p_mid] = gsw.Nsquared(SA, CT, p, lat)
    z_mid = gsw.z_from_p(p_mid, lat)

    N2 = interp_to_edges(N2_mid, z, z_mid, 4)
    #N2 = np.append(np.append([np.nan],N2),[np.nan])
    N2 = np.append(np.append(N2, [np.nan]), [np.nan])
    #eps_mid = interp_to_centers( eps , z_mid , z )
    #SA_mid = interp_to_centers( SA , z_mid , z )
    #CT_mid = interp_to_centers( CT , z_mid , z )

    N2 = remove_bad_N2(N2)
    """
 plotname = figure_path +'N2_%i.png' %(count)
 fig = plt.figure()
 plt.plot(N2_mid,z_mid,'r')#,label="computed")
 plt.plot(N2,z,'--b')#,label="computed")
 plt.axis('tight') #[-0.0001,0.0005,-35.,-23.])
 #plt.grid()
 plt.savefig(plotname,format="png"); plt.close(fig);

 plotname = figure_path +'SA_%i.png' %(count)
 fig = plt.figure()
 #plt.plot(SA_mid,z_mid,'b')#,label="computed")
 plt.plot(SA,z,'b')#,label="computed")
 plt.axis('tight') #[-0.0001,0.0005,-35.,-23.])
 #plt.grid()
 plt.savefig(plotname,format="png"); plt.close(fig);

 plotname = figure_path +'CT_%i.png' %(count)
 fig = plt.figure()
 #plt.plot(CT_mid,z_mid,'b')#,label="computed")
 plt.plot(CT,z,'b')#,label="computed")
 plt.axis('tight') #[-0.0001,0.0005,-35.,-23.])
 #plt.grid()
 plt.savefig(plotname,format="png"); plt.close(fig);

 plotname = figure_path +'eps_%i.png' %(count)
 fig = plt.figure()
 #plt.plot(eps_mid,z_mid,'b')#,label="computed")
 plt.semilogx(eps,z,'b')#,label="computed")
 plt.axis('tight') #[-0.0001,0.0005,-35.,-23.])
 #plt.grid()
 plt.savefig(plotname,format="png"); plt.close(fig);
 """

    f.close()
    return N2, SA, CT, eps, z
예제 #51
0
def test_plot_GATE_III(sim_data):
    """
    plot GATE_III timeseries
    """
    # make directory
    localpath = os.getcwd()
    try:
        os.mkdir(localpath + "/plots/output/GATE_III/")
    except:
        print('GATE_III folder exists')
    try:
        os.mkdir(localpath + "/plots/output/GATE_III/all_variables/")
    except:
        print('GATE_III/all_variables folder exists')

    if (os.path.exists(localpath + "/les_data/GATE_III.nc")):
        les_data = Dataset(localpath + "/les_data/GATE_III.nc", 'r')
    else:
        url_ = "https://www.dropbox.com/s/snhxbzxt4btgiis/TRMM_LBA.nc?dl=0"
        os.system("wget -O "+localpath+"/les_data/TRMM_LBA.nc "+url_)
        les_data = Dataset(localpath + "/les_data/GATE_III.nc", 'r')

    f1 = "plots/output/GATE_III/"
    f2 = f1 + "all_variables/"
    cn = "GATE_III_"
    t0 = 22
    t1 = 24
    zmin = 0.0
    zmax = 15.0
    cb_min = [0, 0] #TODO
    cb_max = [1, 1] #TODO
    fixed_cbar = True
    cb_min_t = [290, 290, 294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
                -0.4, 0, -10, -6,\
                0, -0.06, 0,\
                -0.12, -0.12, -0.15,\
                -.05, -0.35, -0.15]
    cb_max_t = [370, 370, 348, 0.04, 0.004, 0.28, 0.06, 0.06, 100, 0.06, 100, 20, 20, 20,\
                0, 9, 6, 10,\
                0.24, 0.04, 1.75,\
                0.12, 0.12, 0.3,\
                0.35, 0.05, 0.5]

    scm_dict = cmn.read_scm_data(sim_data)
    les_dict = cmn.read_les_data(les_data)

    scm_dict_t = cmn.read_scm_data_timeseries(sim_data)
    les_dict_t = cmn.read_les_data_timeseries(les_data)

    pls.plot_closures(scm_dict, les_dict, t0, t1, zmin, zmax, cn+"closures.pdf", folder=f1)
    pls.plot_spec_hum(scm_dict, les_dict, t0, t1, zmin, zmax,  cn+"humidities.pdf", folder=f1)
    pls.plot_upd_prop(scm_dict, les_dict, t0, t1, zmin, zmax,  cn+"updraft_properties.pdf", folder=f1)
    pls.plot_fluxes(scm_dict, les_dict, t0, t1, zmin, zmax,  cn+"mean_fluxes.pdf", folder=f1)
    pls.plot_tke_comp(scm_dict, les_dict, t0, t1, zmin, zmax,  cn+"tke_components.pdf", folder=f1)

    pls.plot_cvar_mean(scm_dict, les_dict, t0, t1, zmin, zmax,  cn+"var_covar_mean.pdf", folder=f2)
    pls.plot_cvar_comp(scm_dict, t0, t1, zmin, zmax,  cn+"var_covar_components.pdf", folder=f2)
    pls.plot_tke_break(scm_dict, les_dict, t0, t1, zmin, zmax, cn+"tke_breakdown.pdf",folder=f2)

    pls.plot_contour_t(scm_dict, les_dict, fixed_cbar, cb_min_t, cb_max_t, zmin, zmax, folder=f2)
    pls.plot_mean_prof(scm_dict, les_dict, t0, t1,  zmin, zmax, folder=f2)

    pls.plot_main(scm_dict_t, les_dict_t, scm_dict, les_dict,
                  cn+"main_timeseries.pdf", cb_min, cb_max, zmin, zmax, folder=f1)

    pls.plot_1D(scm_dict_t, les_dict_t, cn, folder=f2)
예제 #52
0
            nc_var = nc_data.createVariable(
                '/' + '/'.join(hierarchy) + '/' + json_data['name'],
                json_data['datatype'], tuple(json_data['dimensions']))
        else:
            nc_var = nc_data.createVariable(
                '/' + '/'.join(hierarchy) + '/' + json_data['name'],
                json_data['datatype'])
        # Does the variable have attributes?
        if 'attributes' in json_data:
            for attribute in json_data['attributes']:
                setattr(nc_var, attribute['name'], attribute['value'])
        # Put the data into the newly created variable
        if 'data' in json_data:
            nc_var[:] = json_data['data']


# Input file
data_filepath = sys.argv[1] if len(sys.argv) > 1 else 'data.json'
nc_filepath = sys.argv[2] if len(sys.argv) > 2 else 'data.nc'

# Create the data file and parse
with open(data_filepath) as data_file:
    data_file = json.loads(data_file.read())
base_dir = os.path.dirname(data_filepath)
if base_dir != "":
    base_dir = base_dir + "/"

nc_data = Dataset(nc_filepath, 'w')
parse(data_file, nc_data)
nc_data.close()
예제 #53
0
                j))

list_of_dir.sort()
list_of_files.sort()

list_of_states = [
    'az', 'id', 'mo', 'ny', 'or', 'col', 'az', 'id', 'mo', 'ny', 'or', 'col',
    'az', 'id', 'mo', 'ny', 'or', 'col'
]
list_of_states.sort()

# Loop through the lists to create the csv for each stream, in each resolution.
for file, state in zip(list_of_files, list_of_states):

    # Call the NetCDF file.
    nc = Dataset(file)
    nc.variables.keys()
    nc.dimensions.keys()

    # Define variables from the NetCDF file.
    riv = nc.variables['rivid'][:].tolist()
    lat = nc.variables['lat'][:]
    lon = nc.variables['lon'][:]
    avgQ = nc.variables['average_flow'][:]
    sQ = nc.variables['std_dev_flow'][:]
    maxQ = nc.variables['max_flow']
    minQ = nc.variables['min_flow']

    # Make a list of each day in the year.
    dates = pd.date_range('2014-01-01', '2014-12-31').strftime('%b %d')
예제 #54
0
# Dimensions in a netCDF file.

from netCDF4 import Dataset
rootgrp = Dataset("test.nc", "a")
fcstgrp = rootgrp.createGroup("forecasts")
analgrp = rootgrp.createGroup("analyses")

print rootgrp.groups

level = rootgrp.createDimension("level", None)
time = rootgrp.createDimension("time", None)
lat = rootgrp.createDimension("lat", 73)
lon = rootgrp.createDimension("lon", 144)

print rootgrp.dimensions

print len(lon)
print lon.isunlimited()
print time.isunlimited()

for dimobj in rootgrp.dimensions.values():
    print dimobj
##########################################################################################
##########################################################################################

season_names = ['djf', 'mam', 'jja', 'son', 'ondjfm']

season = 'djf'
#season='mam'
#season='jja'
#season='son'
#season='annual'

# ua_corr_lat_lo, ua_corr_lat_hi, ua_corr_lon_lo, ua_corr_lon_hi = 30,40,170.,205.
# ts_corr_lat_lo, ts_corr_lat_hi, ts_corr_lon_lo, ts_corr_lon_hi = -25,-15,200.,230.

ncfile = Dataset(
    '/ninod/baird/cmip5/obs/ERSSTv4/sst.mnmean.v4_invertlat_72x144regrid.nc',
    'r',
    format='NETCDF4')
sst_data_orig = ncfile.variables['sst'][:]
sst_lat = ncfile.variables['lat'][:]
sst_lon = ncfile.variables['lon'][:]

global_nlat, global_nlon = sst_data_orig.shape[1:3]
global_lat_vals = sst_lat[:]
global_lon_vals = sst_lon[:]

##########################################################################################
##########################################################################################
##########################################################################################

# (YYYY,MM,DD)
예제 #56
0
    exit(2)

# read specmap.017
print "reading data from ", SPEC_FILE
try:
    years_sea, d18Osea = loadtxt(SPEC_FILE, skiprows=2, unpack=True)
except IOError:
    print 'ERROR: File: ' + SPEC_FILE + ' could not be found.'
    exit(2)
years_sea *= 1000.0
# compute sea level from \delta 18^O; see C. Ritz, 1997. "EISMINT
#   intercomparison experiment comparison of existing Greenland models"
dSea = -34.83 * (d18Osea + 1.93)

# open the nc for delta Sea Level file to write to
ncfile = NC(DSL_FILE, 'w', format='NETCDF3_CLASSIC')

# set global attributes
historysep = ' '
historystr = asctime() + ': ' + historysep.join(argv) + '\n'
setattr(ncfile, 'history', historystr)

# define time dimension, then time variable
Stdim = ncfile.createDimension('t', None)
Stvar = ncfile.createVariable('t', 'f4', ('t', ))
setattr(Stvar, 'units', 'years since 1-1-1')

# define climate data variables and attributes
d18Oseavar = ncfile.createVariable('delta_18_O', 'f4', ('t', ))
setattr(d18Oseavar, 'units', 'normalized O-18')  # see specmap_readme.txt
setattr(d18Oseavar, 'long_name',
예제 #57
0
    def _init_dataset(self):
        # frmt = 'normal' if np.issubdtype(self.pcr_metadata['dtype'], np.unsignedinteger) else 'classic'
        self.frmt = self.nc_metadata.get('format', 'NETCDF4')
        self.nf = Dataset(self.name, 'w', format=self.frmt)
        self.nf.history = 'Created {}'.format(time.ctime(time.time()))
        self.nf.Conventions = 'CF-1.7'
        self.nf.Source_Software = 'JRC.E1 lisfloodutilities nexus wefe - pcr2nc'
        self.nf.source = self.nc_metadata.get('source')
        self.nf.reference = self.nc_metadata.get('reference')

        # Dimensions
        if self.is_mapstack:
            self.nf.createDimension('time', None)
        self.nf.createDimension('yc', self.pcr_metadata['rows'])
        self.nf.createDimension('xc', self.pcr_metadata['cols'])

        # define coordinates variables by calling one of the define_* functions
        datum_function = 'define_{}'.format(
            self.nc_metadata['geographical']['datum'].lower())
        post_datum_function = '{}_post'.format(datum_function)
        getattr(self, datum_function)()

        time_nc = None
        vardimensions = ('yc', 'xc')
        if self.is_mapstack:
            # time variable
            time_units = self.nc_metadata['time'].get('units', '')
            time_nc = self.nf.createVariable('time', 'f8', ('time', ))
            time_nc.standard_name = 'time'
            if str(self.hour) != '24':
                time_nc.units = '{} {}:00'.format(time_units,
                                                  str(self.hour).zfill(2))
            else:
                # observation is at 24h...need to rotate one day more
                start_date = datetime.datetime.strptime(
                    time_units[-10:], '%Y-%m-%d')  # 'days since 1996-01-01'
                start_date = start_date + datetime.timedelta(days=1)
                time_nc.units = 'days since {} 00:00'.format(
                    start_date.strftime('%Y-%m-%d'))
            time_nc.calendar = self.nc_metadata['time'].get(
                'calendar', 'proleptic_gregorian')
            vardimensions = ('time', 'yc', 'xc')

        # data variable
        complevel = self.nc_metadata['variable'].get('compression')
        additional_args = {'zlib': bool(complevel)}
        if complevel:
            print('Applying compression level', str(complevel))
            additional_args['complevel'] = complevel
        if np.issubdtype(self.pcr_metadata['dtype'], np.floating):
            additional_args['least_significant_digit'] = self.nc_metadata.get(
                'least_significant_digit', None)

        values_nc = self.nf.createVariable(
            self.nc_metadata['variable'].get('shortname', ''),
            self.pcr_metadata['dtype'], vardimensions, **additional_args)
        getattr(self, post_datum_function)(values_nc)

        values_nc.standard_name = self.nc_metadata['variable'].get(
            'shortname', '')
        values_nc.long_name = self.nc_metadata['variable'].get('longname', '')
        values_nc.units = self.nc_metadata['variable'].get('units', '')
        return time_nc, values_nc
예제 #58
0
def main(nc_path_to_directions=""):
    ds = Dataset(nc_path_to_directions)

    margin = 20

    var_name = "accumulation_area"
    data = ds.variables[var_name][margin:-margin, margin:-margin]

    data = np.ma.masked_where(data <= 0, data)

    # flow directions
    fldr = ds.variables["flow_direction_value"][:][margin:-margin,
                                                   margin:-margin]
    lkfr = ds.variables["lake_fraction"][:][margin:-margin, margin:-margin]
    lkouts = ds.variables["lake_outlet"][:][margin:-margin, margin:-margin]

    lkids = calculate_lake_ids(fldr, lkfr, lkouts)

    # plotting
    i_shifts, j_shifts = direction_and_value.flowdir_values_to_shift(fldr)
    lons, lats = [
        ds.variables[key][margin:-margin, margin:-margin]
        for key in ["lon", "lat"]
    ]
    bsmap = gc.get_basemap(lons=lons, lats=lats)

    x, y = bsmap(lons, lats)
    fig = plt.figure(figsize=(15, 15))

    img = bsmap.pcolormesh(x, y, lkids)
    bsmap.colorbar(img)

    bsmap.pcolormesh(x, y, lkouts, cmap="gray_r")

    nx, ny = x.shape
    inds_j, inds_i = np.meshgrid(range(ny), range(nx))
    inds_i_next = inds_i + i_shifts
    inds_j_next = inds_j + j_shifts

    inds_i_next = np.ma.masked_where((inds_i_next == nx) | (inds_i_next == -1),
                                     inds_i_next)
    inds_j_next = np.ma.masked_where((inds_j_next == ny) | (inds_j_next == -1),
                                     inds_j_next)

    u = np.ma.masked_all_like(x)
    v = np.ma.masked_all_like(x)

    good = (~inds_i_next.mask) & (~inds_j_next.mask)
    u[good] = x[inds_i_next[good], inds_j_next[good]] - x[inds_i[good],
                                                          inds_j[good]]
    v[good] = y[inds_i_next[good], inds_j_next[good]] - y[inds_i[good],
                                                          inds_j[good]]

    bsmap.quiver(x,
                 y,
                 u,
                 v,
                 pivot="tail",
                 width=0.0005,
                 scale_units="xy",
                 headlength=20,
                 headwidth=15,
                 scale=1)

    bsmap.drawcoastlines(linewidth=0.5)

    bsmap.drawrivers(color="b")

    # plt.savefig(nc_path_to_directions[:-3] + "png", bbox_inches="tight")

    plt.show()
예제 #59
0
class NetCDFWriter:
    """
    This class manages all aspects concerning definition and writing of a NetCDF4 file.
    """

    FORMATS = {'classic': 'NETCDF4_CLASSIC', 'normal': 'NETCDF4'}
    DATUM = {
        'ETRS89':
        'PROJCS["JRC_LAEA_ETRS-DEF",GEOGCS["GCS_ETRS_1989",DATUM["D_ETRS_1989",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Azimuthal_Equal_Area"],PARAMETER["False_Easting",4321000.0],PARAMETER["False_Northing",3210000.0],PARAMETER["Central_Meridian",10.0],PARAMETER["Latitude_Of_Origin",52.0],UNIT["Meter",1.0]]',
        'WGS84':
        'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.0174532925199433]]',
        'GISCO':
        'PROJCS["PCS_Lambert_Azimuthal_Equal_Area",GEOGCS["GCS_User_Defined",DATUM["D_User_Defined",SPHEROID["User_Defined_Spheroid",6378388.0,0.0]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Azimuthal_Equal_Area"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",9.0],PARAMETER["Latitude_Of_Origin",48.0],UNIT["Meter",1.0]]',
    }

    def __init__(self, filename, nc_metadata, pcr_metadata, mapstack=True):
        self.name = '{}.nc'.format(
            filename) if not filename.endswith('.nc') else filename
        self.nc_metadata = nc_metadata
        self.hour = self.nc_metadata.get('time', {}).get('hour', '00')

        self.pcr_metadata = pcr_metadata
        self.is_mapstack = mapstack
        self.time, self.variable = self._init_dataset()

        self.hour_timestep = float(self.hour) / 24
        self.values = []
        self.timesteps = []
        self.current_count = 0
        self.current_idx1 = 0
        self.current_idx2 = 0

    def _init_dataset(self):
        # frmt = 'normal' if np.issubdtype(self.pcr_metadata['dtype'], np.unsignedinteger) else 'classic'
        self.frmt = self.nc_metadata.get('format', 'NETCDF4')
        self.nf = Dataset(self.name, 'w', format=self.frmt)
        self.nf.history = 'Created {}'.format(time.ctime(time.time()))
        self.nf.Conventions = 'CF-1.7'
        self.nf.Source_Software = 'JRC.E1 lisfloodutilities nexus wefe - pcr2nc'
        self.nf.source = self.nc_metadata.get('source')
        self.nf.reference = self.nc_metadata.get('reference')

        # Dimensions
        if self.is_mapstack:
            self.nf.createDimension('time', None)
        self.nf.createDimension('yc', self.pcr_metadata['rows'])
        self.nf.createDimension('xc', self.pcr_metadata['cols'])

        # define coordinates variables by calling one of the define_* functions
        datum_function = 'define_{}'.format(
            self.nc_metadata['geographical']['datum'].lower())
        post_datum_function = '{}_post'.format(datum_function)
        getattr(self, datum_function)()

        time_nc = None
        vardimensions = ('yc', 'xc')
        if self.is_mapstack:
            # time variable
            time_units = self.nc_metadata['time'].get('units', '')
            time_nc = self.nf.createVariable('time', 'f8', ('time', ))
            time_nc.standard_name = 'time'
            if str(self.hour) != '24':
                time_nc.units = '{} {}:00'.format(time_units,
                                                  str(self.hour).zfill(2))
            else:
                # observation is at 24h...need to rotate one day more
                start_date = datetime.datetime.strptime(
                    time_units[-10:], '%Y-%m-%d')  # 'days since 1996-01-01'
                start_date = start_date + datetime.timedelta(days=1)
                time_nc.units = 'days since {} 00:00'.format(
                    start_date.strftime('%Y-%m-%d'))
            time_nc.calendar = self.nc_metadata['time'].get(
                'calendar', 'proleptic_gregorian')
            vardimensions = ('time', 'yc', 'xc')

        # data variable
        complevel = self.nc_metadata['variable'].get('compression')
        additional_args = {'zlib': bool(complevel)}
        if complevel:
            print('Applying compression level', str(complevel))
            additional_args['complevel'] = complevel
        if np.issubdtype(self.pcr_metadata['dtype'], np.floating):
            additional_args['least_significant_digit'] = self.nc_metadata.get(
                'least_significant_digit', None)

        values_nc = self.nf.createVariable(
            self.nc_metadata['variable'].get('shortname', ''),
            self.pcr_metadata['dtype'], vardimensions, **additional_args)
        getattr(self, post_datum_function)(values_nc)

        values_nc.standard_name = self.nc_metadata['variable'].get(
            'shortname', '')
        values_nc.long_name = self.nc_metadata['variable'].get('longname', '')
        values_nc.units = self.nc_metadata['variable'].get('units', '')
        return time_nc, values_nc

    def add_to_stack(self, pcr_map, time_step=None):
        """
        Add a PCRaster map to the NetCDF4 file.
        :param time_step: int, it's basically the extension of pcraster map file
            For single files (ie not time series) time_step is None
        :param pcr_map: PCRasterMap object
        """
        print('Adding', pcr_map.filename, 'timestep', str(time_step), 'hour',
              self.hour_timestep)
        values = pcr_map.data
        if not np.issubdtype(values.dtype, np.integer):
            values[values == pcr_map.mv] = np.nan
        self.values.append(values)
        if time_step:
            self.timesteps.append(float(time_step))
        self.current_count += 1

        if self.current_count == 20:
            self.current_idx2 += self.current_count
            print('Writing a chunk...')
            dtype = self.values[0].dtype
            if self.is_mapstack:
                self.variable[
                    self.current_idx1:self.current_idx2, :, :] = np.array(
                        self.values, dtype=dtype)
            else:
                self.variable[:, :] = np.array(self.values, dtype=dtype)
            # update slicing indexes
            self.current_idx1 = self.current_idx2
            # reset
            self.values = []
            self.current_count = 0

    def finalize(self):
        """
        Write last maps to the stack and close the NetCDF4 dataset.
        """
        print('Writing...', self.name)
        if self.is_mapstack:
            print('Writing time dimension...', self.timesteps[:4], '...',
                  self.timesteps[-4:])
            self.time[:] = np.array(self.timesteps, dtype=np.float64)

        if self.values:
            dtype = self.values[0].dtype
            if self.is_mapstack:
                self.current_idx2 += self.current_count
                self.variable[
                    self.current_idx1:self.current_idx2, :, :] = np.array(
                        self.values, dtype=dtype)
            else:
                self.variable[:, :] = np.array(self.values, dtype=dtype)
        self.nf.close()

    def define_wgs84(self):
        """
        Define WGS84 reference system
        """
        # coordinates variables
        print('Defining WGS84 coordinates variables')
        longitude = self.nf.createVariable('lon', 'f8', ('xc', ))
        longitude.standard_name = 'longitude'
        longitude.long_name = 'longitude coordinate'
        longitude.units = 'degrees_east'

        latitude = self.nf.createVariable('lat', 'f8', ('yc', ))
        latitude.standard_name = 'latitude'
        latitude.long_name = 'latitude coordinate'
        latitude.units = 'degrees_north'
        longitude[:] = self.pcr_metadata['lons']
        latitude[:] = self.pcr_metadata['lats']

    def define_wgs84_post(self, values_var):
        values_var.coordinates = 'lon lat'
        values_var.esri_pe_string = self.DATUM.get(
            self.nc_metadata['geographical'].get('datum', 'WGS84').upper(), '')

    def define_etrs89(self):
        """
        Define a ETRS89 reference system
        """
        print('Defining ETRS89 coordinates variables')
        # Variables
        x = self.nf.createVariable('x', 'f8', ('xc', ))
        y = self.nf.createVariable('y', 'f8', ('yc', ))
        x.standard_name = 'projection_x_coordinate'
        x.long_name = 'x coordinate of projection'
        x.units = 'Meter'

        y.standard_name = 'projection_y_coordinate'
        y.long_name = 'y coordinate of projection'
        y.units = 'Meter'
        x[:] = self.pcr_metadata['lons']
        y[:] = self.pcr_metadata['lats']

        proj = self.nf.createVariable('laea', 'i4')
        proj.grid_mapping_name = 'lambert_azimuthal_equal_area'
        proj.false_easting = 4321000.0
        proj.false_northing = 3210000.0
        proj.longitude_of_projection_origin = 10.0
        proj.latitude_of_projection_origin = 52.0
        proj.semi_major_axis = 6378137.0
        proj.inverse_flattening = 298.257223563
        proj.proj4_params = "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs"
        proj.EPSG_code = "EPSG:3035"

    def define_etrs89_post(self, values_var):
        values_var.coordinates = 'x y'
        values_var.grid_mapping = 'lambert_azimuthal_equal_area'
        values_var.esri_pe_string = self.DATUM.get(
            self.nc_metadata['geographical'].get('datum', 'WGS84').upper(), '')

    def define_gisco(self):
        """
        It defines a custom LAEA ETRS89 GISCO reference system
        """
        print('Defining GISCO coordinates variables')
        # Variables
        x = self.nf.createVariable('x', 'f8', ('xc', ))
        y = self.nf.createVariable('y', 'f8', ('yc', ))
        x.standard_name = 'projection_x_coordinate'
        x.long_name = 'x coordinate of projection'
        x.units = 'Meter'

        y.standard_name = 'projection_y_coordinate'
        y.long_name = 'y coordinate of projection'
        y.units = 'Meter'
        x[:] = self.pcr_metadata['lons']
        y[:] = self.pcr_metadata['lats']

        proj = self.nf.createVariable('laea', 'i4')
        proj.grid_mapping_name = 'lambert_azimuthal_equal_area'
        proj.false_easting = 0.0
        proj.false_northing = 0.0
        proj.longitude_of_projection_origin = 9.0
        proj.latitude_of_projection_origin = 48.0
        proj.semi_major_axis = 6378388.0
        proj.inverse_flattening = 0.0
        proj.proj4_params = "+proj=laea +lat_0=48 +lon_0=9 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs"
        proj.EPSG_code = "EPSG:3035"

    def define_gisco_post(self, values_var):
        values_var.coordinates = 'x y'
        values_var.grid_mapping = 'lambert_azimuthal_equal_area'
        values_var.esri_pe_string = self.DATUM.get(
            self.nc_metadata['geographical'].get('datum', 'WGS84').upper(), '')
예제 #60
0
def write_lba_obs_netcdf(obs_output, site, outfilename):
    """
    Takes a pandas dataframe with the obs data a writes it to netcdf, for a given site
    """

    print "Writing LBA OBS output to netcdf with relevant metadata/attributes"

    data = Dataset(outfilename, 'w', format='NETCDF4')

    lats = [sitelocs[site][0]]
    lons = [sitelocs[site][1]]

    latdim = data.createDimension('lat', len(lats))
    londim = data.createDimension('lon', len(lons))
    tdim = data.createDimension('time', None)  # record, or unlimited dimension

    # variables
    latitudes = data.createVariable('lat', 'f4', ('lat', ))
    longitudes = data.createVariable('lon', 'f4', ('lon', ))
    time = data.createVariable('time', np.float64, ('time', ))

    latitudes.units = 'degrees_north'
    longitudes.units = 'degrees_east'

    latitudes.title = 'Latitude'
    longitudes.title = 'Longitude'

    latitudes.actual_min = min(lats)
    latitudes.actual_max = max(lats)

    longitudes.actual_min = min(lons)
    longitudes.actual_max = max(lons)

    latitudes[:] = lats
    longitudes[:] = lons

    print "Creating metadata"

    # add values to time variable

    # For lba this is daily! (Not hourly)
    #times = [datetime.datetime.combine(site_dates[site][0], datetime.time()) + datetime.timedelta(hours=hour) for hour in range(site_dates[site][1]*24)]

    #times = [datetime.date(site_dates[site][0] + datetime.timedelta(days=day) for day in range(site_dates[site][1])]

    # For daily timeseries, you can do this:
    times = [
        datetime.datetime.combine(site_dates_lba[site][0], datetime.time()) +
        datetime.timedelta(days=day) for day in range(site_dates_lba[site][1])
    ]
    #times = [dt.datetime.combine(dates[i], dt.time()) for i, date in enumerate(dates)]

    time.units = 'days since 1850-01-01 00:00:00.0'  # CHECK THIS MATCHES WHAT IS IN YOUR INPUT DATA!
    time.calendar = 'gregorian'
    time[:] = date2num(times, time.units, calendar=time.calendar)

    print "Writing data to file for " + str(len(lats)) + " point(s)"

    for in_var in lba_obs_in_vars:
        print "Creating netcdf variable: " + in_var
        dataout = data.createVariable(in_var,
                                      'f4', (
                                          'time',
                                          'lat',
                                          'lon',
                                      ),
                                      fill_value=-9999.)

        # EDIT THIS, add metadata for other variables here
        if in_var == "GEP_model":  # GPP_GB alias
            dataout.units = "g.m-2.d-1"
            dataout.long_name = "Gridbox GPP"
            dataout.title = "GPP_GB"
        if in_var == "NEE_model":
            dataout.units = "g.m-2.d-1"
            dataout.long_name = "Gridbox NEE"
        else:
            print "No data variables to write...? \n Check your netcdf file"

        print "Writing " + in_var + " data to file..."
        print obs_output[in_var]
        dataout[:, 0, 0] = obs_output[in_var].values

        # min/max values
        dataout.actual_min = np.min(dataout[:, 0, 0])
        dataout.actual_max = np.max(dataout[:, 0, 0])

    data.close()
    print "***SUCCESS writing to file***"