Пример #1
0
def main():
    crs = ccrs.PlateCarree(central_longitude=0)
    x
    fname = ('../DTU/DTU10MDT_2min.nc')
    dataset = netcdf_dataset(fname)

    # print(dataset.variables)
    mdt = dataset.variables['mdt'][:]
    # mdt = bound_arr(mdt, -2, 2)
    lats = dataset.variables['lat'][:]
    lons = dataset.variables['lon'][:]
    print(mdt.shape)
    print(lats, lons)
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1, projection=crs)
    # ax = plt.axes(projection=crs)

    # mdt = mdt + mask

    vmin = np.min(mdt)
    vmax = np.max(mdt)
    print(vmin, vmax)

    # plt.pcolormesh(lons, lats, mdt, transform=crs)
    im = ax.pcolormesh(lons,
                       lats,
                       mdt,
                       transform=crs,
                       cmap='turbo',
                       vmin=vmin,
                       vmax=vmax)

    ax.set_xticks([-180, -135, -90, -45, 0, 45, 90, 135, 180], crs=crs)
    ax.set_yticks([-90, -60, -30, 0, 30, 60, 90], crs=crs)
    lon_formatter = LongitudeFormatter(zero_direction_label=True)
    lat_formatter = LatitudeFormatter()

    # Following axes formatters cannot be used with non-rectangular projections
    ax.xaxis.set_major_formatter(lon_formatter)
    ax.yaxis.set_major_formatter(lat_formatter)
    ax.fontsize = 20
    # ax.add_feature(cfeature.LAND, resolution='10m')
    # ax.coastlines()

    # ax.gridlines()
    # ax.set_extent([100, 160, 0, 60])
    # ax.set_aspect('auto', adjustable=None)

    # cbar_arr = (np.linspace(vmin, vmax, 5, dtype=int))

    # Uncomment following to produce colorbar corresponding to each individual plot:
    fig.colorbar(im,
                 ax=ax,
                 fraction=0.0235,
                 pad=0.04,
                 ticks=[-2, -1.5, -1, -0.5, 0, 0.5, 1, 1., 1.5, 2],
                 format=ticker.FuncFormatter(cbar_fmt))
    plt.title('DTU10 Global Mean Dynamic Topography: 2 min resolution',
              fontsize=21)
    plt.show()
Пример #2
0
def request_netcdf_dataset(request_url, nc_filename):             
    req = Request(url)

    try:
        response = urlopen(req, timeout=70)
    except HTTPError as he:
        # Update config file to show that the dataset is not valid 
        print("Could not access data from ERDDAP")
        print(he.reason)
    except URLError as e:
        print('  * Failed to reach erddap server. Stopping.')
        print('  * reason: ', e.reason)
    except Exception:
        print('generic exception: ' + traceback.format_exc())
        print("General Error (likely timeout)")
    else:
        print('  * data file returned from erddap')
        CHUNK = 16 *1024
        with open(nc_filename,'wb') as f:
            while True:
                chunk = response.read(CHUNK)
                if not chunk:
                    break
                f.write(chunk)
        try:
            dataset = netcdf_dataset(nc_filename)
        except:
            print("Not valid netcdf returned")
        else:
            return dataset
Пример #3
0
def getPointDF(dirname, filename, in_lat, in_lon, make_csv=True):
    ##open netcdf
    nc = netcdf_dataset(os.path.join('.', dirname, filename))
    ##pick a point
    lats = nc.variables['lat'][:]
    lons = nc.variables['lon'][:]

    lat_idx = geo_idx(in_lat, lats)
    lon_idx = geo_idx(in_lon, lons)
    ##pull out variables at that point (lev1 only) for each variable (pre-defined list)
    vardf = pd.DataFrame(index=variable_names, columns=month_names)
    #dict/df: key=varname, value=12x1 array retrieved
    ##pull out 1 layer of variable to add to df -- internal loop
    for varname in variable_names:
        var = nc[varname]
        if (len(var.shape) == 3):  ##2d var
            vardf.loc[varname, :] = var[:, lat_idx, lon_idx]
        elif (len(var.shape) == 4):  ##3d var
            vardf.loc[varname, :] = var[:, 0, lat_idx, lon_idx]
    ##write to csv with newfilename
    if (make_csv):
        newfilename = os.path.join(
            '.', dirname,
            filename.split(".")[0] + '_' + str(in_lat) + '_' + str(in_lon) +
            ".csv")
        vardf.to_csv(newfilename)
    return (vardf)
Пример #4
0
def getData(dataset_id, parameter, bounds, time, sub, mapImageDir):

    lat1 = qbounds[0]
    lat2 = qbounds[1]
    lon1 = qbounds[2]
    lon2 = qbounds[3]
    lat_order = qbounds[4]
    altf = qbounds[5]
    time1 = time  # using just one timestamp for this example
    time2 = time

    print(lat1)
    print(lat2)
    print(lon1)
    print(lon2)
    print(sub)

    print(time1)
    print(time2)

    if altf == 1:
        #write query with altitude, haven't tested this
        alt1 = float(bounds[6])
        alt2 = float(bounds[7])
        altsub = '1'
        print(alt1)
        print(alt2)
        print(altsub)
        query = parameter + '[(%s):%s:(%s)][(%f):%s:(%f)][(%f):%s:(%f)][(%f):%s:(%f)]' % (
            time1, sub, time2, alt1, altsub, alt2, lat1, sub, lat2, lon1, sub,
            lon2)

    else:
        #write query without altitude
        query = parameter + '[(%s):%s:(%s)][(%f):%s:(%f)][(%f):%s:(%f)]' % (
            time1, sub, time2, lat1, sub, lat2, lon1, sub, lon2)

    base_url = 'http://polarwatch.noaa.gov/erddap/griddap/' + dataset_id + '.nc?'
    url = base_url + query
    print(url)

    file = mapImageDir + '/' + parameter + '.nc'
    http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
                               ca_certs=certifi.where())
    r = http.request('GET', url, preload_content=False)
    with open(file, 'wb') as out:
        while True:
            data = r.read(1024 * 1024)
            if not data:
                break
            out.write(data)
    r.release_conn()

    # add a real check here
    print('Satellite Data File Retrieved')

    data_netcdf_dataset = netcdf_dataset(file)
    dataset = {"data": data_netcdf_dataset, "url": url}
    return dataset
Пример #5
0
def load_cls(fname, path=None, var='mdt'):
    if path is None:
        path = ""
    filepath = os.path.join(os.path.normpath(path), fname)
    dataset = netcdf_dataset(filepath)
    var = dataset.variables[var][0, :, :]
    lats = dataset.variables['latitude'][:]
    lons = dataset.variables['longitude'][:]
    return var, lats, lons
Пример #6
0
 def __init__(self,base:str,peturbed:str):
     '''
     Arguments:
         base - the initial simulation path and filename
         peturbed - the changes simulation path and filename
         
         Both arguments require the full path to the netCDF file we wish to load.
     '''
     
     self.basename = base.split('/')[-1]
     self.base = netcdf_dataset(base)
     self.basevars = self.base.variables
     
     self.peturbedname = peturbed.split('/')[-1]
     self.peturbed = netcdf_dataset(peturbed)
     self.peturbedvars = self.base.variables
     
     self.varboth = set(self.basevars) & set(self.basevars)
Пример #7
0
def load_dtu(fname, path=None, var='mdt'):
    if path is None:
        path = ""
    filepath = os.path.join(os.path.normpath(path), fname)
    dataset = netcdf_dataset(filepath)
    print(dataset.variables)
    var = dataset.variables[var][:]
    lats = dataset.variables['lat'][:]
    lons = dataset.variables['lon'][:]
    return var, lats, lons
def getData(dId, parameter, bounds, time, sub):
    print(bounds)
    print(parameter)
    print(time)

    lat1 = bounds[0]
    lat2 = bounds[1]
    lon1 = bounds[2]
    lon2 = bounds[3]
    altf = bounds[4]

    time1 = time  # using just one timestamp for this example
    time2 = time

    if altf == 1:
        #write query with altitude, haven't tested this
        alt1 = bounds[5]
        alt2 = bounds[6]
        altsub = 1
        query = parameter + '[(%s):%s:(%s)][(%f):%s:(%f)][(%f):%s:(%f)][(%f):%s:(%f)]' % (
            time1, sub, time2, alt1, altsub, alt2, lat1, sub, lat2, lon1, sub,
            lon2)

    else:
        #write query without altitude
        query = parameter + '[(%s):%s:(%s)][(%f):%s:(%f)][(%f):%s:(%f)]' % (
            time1, sub, time2, lat1, sub, lat2, lon1, sub, lon2)

    base_url = 'http://polarwatch.noaa.gov/erddap/griddap/' + dId + '.nc?'
    url = base_url + query

    print(url)

    file = 'dataset.nc'

    http = urllib3.PoolManager()
    r = http.request('GET', url, preload_content=False)

    with open(file, 'wb') as out:
        while True:
            data = r.read(1024 * 1024)
            if not data:
                break
            out.write(data)

    r.release_conn()

    # add a real check here
    print('Satellite Data File Retrieved')

    datafile = netcdf_dataset(file)
    dataset = {"data": datafile, "url": url}
    return dataset
def datareader(dp=[], d=[], v=[], *args):
    Dpath = []
    Dread = []
    for i in range(0, len(dp)):
        Dpath.append(dp[i] + d[i])
        if (d[i] == ''):
            continue
        temp = netcdf_dataset(Dpath[i])
        try:
            Dread.append(temp.variables[v[i]][0, :, :])
        except:
            Dread.append(temp.variables[v[i]])
    try:
        Dread.append(temp.variables['lat'][:])
        Dread.append(temp.variables['lon'][:])
    except:
        print()
    return Dread
Пример #10
0
def read_dataset(name='', granule_name='', variable=None, path='/tmp'):
    d = netcdf_dataset(path, mode='r')
    dataset = d.variables[variable]

    # By convention, but not by standard, if the dimensions exist, they will be in the order:
    # time (t), altitude (z), latitude (y), longitude (x)
    # but conventions aren't always followed and all dimensions aren't always present so
    # see if we can make some educated deductions before defaulting to just pulling the first three
    # columns.
    temp_dimensions = list(map(lambda x: x.lower(), dataset.dimensions))
    dataset_dimensions = dataset.dimensions
    time = dataset_dimensions[temp_dimensions.index('time') if 'time' in
                              temp_dimensions else 0]
    lat = dataset_dimensions[temp_dimensions.index('lat') if 'lat' in
                             temp_dimensions else 1]
    lon = dataset_dimensions[temp_dimensions.index('lon') if 'lon' in
                             temp_dimensions else 2]

    # Time is given to us in some units since an epoch. We need to convert
    # these values to datetime objects. Note that we use the main object's
    # time object and not the dataset specific reference to it. We need to
    # grab the 'units' from it and it fails on the dataset specific object.
    times = np.array(convert_times_to_datetime(d[time]))
    lats = np.array(d.variables[lat][:])
    lons = np.array(d.variables[lon][:])
    values = np.array(dataset[:])
    origin = {'source': 'PO.DAAC', 'url': 'podaac.jpl.nasa.gov'}

    # Removing the downloaded temporary granule before creating the OCW
    # dataset.
    d.close()
    path = os.path.join(os.path.dirname(__file__), granule_name)
    os.remove(path)

    return Dataset(lats,
                   lons,
                   times,
                   values,
                   variable,
                   name=name,
                   origin=origin)
Пример #11
0
def read_dataset(name='', granule_name ='', variable=None, path='/tmp'):
    d = netcdf_dataset(path, mode='r')
    dataset = d.variables[variable]

    # By convention, but not by standard, if the dimensions exist, they will be in the order:
    # time (t), altitude (z), latitude (y), longitude (x)
    # but conventions aren't always followed and all dimensions aren't always present so
    # see if we can make some educated deductions before defaulting to just pulling the first three
    # columns.
    temp_dimensions = list(map(lambda x: x.lower(), dataset.dimensions))
    dataset_dimensions = dataset.dimensions
    time = dataset_dimensions[temp_dimensions.index(
        'time') if 'time' in temp_dimensions else 0]
    lat = dataset_dimensions[temp_dimensions.index(
        'lat') if 'lat' in temp_dimensions else 1]
    lon = dataset_dimensions[temp_dimensions.index(
        'lon') if 'lon' in temp_dimensions else 2]

    # Time is given to us in some units since an epoch. We need to convert
    # these values to datetime objects. Note that we use the main object's
    # time object and not the dataset specific reference to it. We need to
    # grab the 'units' from it and it fails on the dataset specific object.
    times = np.array(convert_times_to_datetime(d[time]))
    lats = np.array(d.variables[lat][:])
    lons = np.array(d.variables[lon][:])
    values = np.array(dataset[:])
    origin = {
        'source': 'NASA JPL PO.DAAC',
        'url': 'https://podaac.jpl.nasa.gov'
    }

    # Removing the downloaded temporary granule before creating the OCW
    # dataset.
    d.close()
    path = os.path.join(os.path.dirname(__file__), granule_name)
    os.remove(path)

    return Dataset(lats, lons, times, values, variable, name=name, origin=origin)
Пример #12
0
def read_nc(filepath,filename):
    """
    Read satellite nc file

    :param string filepath: path to file
    :param string filename: name of file

    :return netcdf dataset: Array of sst over time and space
    :return list: Time indices
    :return list: Time labels

    """

    dataset = netcdf_dataset(filepath+filename,"r",format="NETCDF3_64BIT_DATA")
    time = dataset.variables['analysed_sst'][:,0,0]
    time=len(time)
    time=np.arange(time)
    time=np.asarray(time)
    print(time)

    #################
    # Getting date labels to put in the images:
    #################
    time_label = dataset.variables['time'][:] #Time is in epoch time, need to convert it to human readable time
    #Use datetime to convert to human readable:
    time_list = []
    for x in time:
        lab = datetime.datetime.fromtimestamp(time_label[x]).strftime('%Y-%m-%d %H:%M:%S')
        #print(lab)
        time_list.append(lab)

    #Convert to data frame, separate date from time into different columns, keep date column only:
    time_label = DataFrame(time_list, columns=['date'])
    time_label['date'] = time_label['date'].str.split(r'\ ').str.get(0)

    return [dataset,time,time_label]
def get_grid(filename):

    print(filename)

    nc = netcdf_dataset(filename)

    sat_height = nc.variables[
        'goes_imager_projection'].perspective_point_height
    radar_lon = -98.128
    radar_lat = 36.741

    _x = nc.variables['x'] * sat_height
    _y = nc.variables['y'] * sat_height
    _c = nc.variables['CMI_C13'][:] * -1
    data = nc.variables['CMI_C13']

    proj_var = nc.variables[data.grid_mapping]

    globe = ccrs.Globe(ellipse='sphere',
                       semimajor_axis=proj_var.semi_major_axis,
                       semiminor_axis=proj_var.semi_minor_axis)

    proj = ccrs.Geostationary(central_longitude=-75,
                              sweep_axis='x',
                              satellite_height=sat_height,
                              globe=globe)

    trans = ccrs.PlateCarree(central_longitude=0)

    transform_xy = trans.transform_points(proj, _x, _y)

    lim = [
        _nearest(transform_xy[:, 0], -103),
        _nearest(transform_xy[:, 0], -92),
        _nearest(transform_xy[:, 1], 42),
        _nearest(transform_xy[:, 1], 30)
    ]

    x = _x[lim[0]:lim[1]]
    y = _y[lim[2]:lim[3]]
    c = _c[lim[2]:lim[3], lim[0]:lim[1]]

    dx = x.max() - x.min()
    dy = y.max() - y.min()

    print('X: ' + str(dx))
    print('Y: ' + str(dy))

    #    x = _x
    #    y = _y
    #    c = _c

    x_mesh, y_mesh = np.meshgrid(x, y)

    print(proj)
    print(x_mesh)
    print(y_mesh)

    lonlat = trans.transform_points(proj, x_mesh, y_mesh)
    lons = lonlat[:, :, 0]
    lats = lonlat[:, :, 1]

    interp_c = interp_lonlat(lons, lats, c, radar_lon, radar_lat, grid_x,
                             grid_y)

    _time = {
        'calendar': 'gregorian',
        'data': np.array([0.934]),
        'long_name': 'Time of grid',
        'standard_name': 'time',
        'units': str('seconds since ' + nc.time_coverage_end)
    }

    #    _fields = {'reflectivity': {'_FillValue': -9999.0, 'data': np.ma.masked_array(c, mask= False),
    #                       'long_name': 'reflectivity',
    #                       'standard_name': 'equivalent_reflectivity_factor',
    #                       'units': 'dBZ', 'valid_max': c.max(), 'valid_min': c.min()}}

    _fields = {
        'c13': {
            '_FillValue': -9999.0,
            'data': interp_c,
            'long_name': 'channel 13 10.3 microns K',
            'standard_name': 'c13',
            'units': 'K',
            'valid_max': c.max(),
            'valid_min': c.min()
        }
    }

    _metadata = {
        'Conventions': '',
        'comment': '',
        'history': '',
        'institution': '',
        'instrument_name': '',
        'original_container': 'NEXRAD Level II',
        'references': '',
        'source': '',
        'title': '',
        'vcp_pattern': '',
        'version': ''
    }

    _origin_latitude = {
        'data': np.array([0]),
        'long_name': 'Latitude at grid origin',
        'standard_name': 'latitude',
        'units': 'degrees_north',
        'valid_max': 90.0,
        'valid_min': -90.0
    }

    #    _origin_longitude = {'data': np.ma.array([radar_lon]),
    #                        'long_name': 'Longitude at grid origin',
    #                        'standard_name': 'longitude', 'units': 'degrees_east',
    #                        'valid_max': 180.0, 'valid_min': -180.0}
    #
    #    _origin_altitude = {'data': np.ma.masked_array(np.array([0]), mask= False),
    #                       'long_name': 'Altitude at grid origin',
    #                       'standard_name': 'altitude', 'units': 'm'}

    _x = {
        'axis': 'X',
        'data': grid_x['data'],
        'long_name': 'X distance on the projection plane from the origin',
        'standard_name': 'projection_x_coordinate',
        'units': 'm'
    }

    _y = {
        'axis': 'Y',
        'data': grid_y['data'],
        'long_name': 'Y distance on the projection plane from the origin',
        'standard_name': 'projection_x_coordinate',
        'units': 'm'
    }

    _z = {
        'axis': 'Z',
        'data': np.array([0, mesh_size]),
        'long_name': 'Z distance on the projection plane from the origin',
        'positive': 'up',
        'standard_name': 'projection_z_coordinate',
        'units': 'm'
    }

    grid = pyart.core.grid.Grid(time=_time,
                                fields=_fields,
                                metadata=_metadata,
                                origin_latitude=_orig_lat,
                                origin_longitude=_orig_lon,
                                origin_altitude=_orig_alt,
                                x=_x,
                                y=_y,
                                z=_z,
                                projection=_projection,
                                radar_longitude=_orig_lon,
                                radar_latitude=_orig_lat,
                                radar_altitude=_orig_alt)

    grid_name = os.path.basename(filename[:-3] + '_grid.nc')
    full_name = os.path.join(out_dir, grid_name)
Пример #14
0
from mpl_toolkits.axes_grid1 import AxesGrid
import matplotlib.path as mpath
import matplotlib.colors as colors
# numpy
import numpy as np
# parameters
from get_parameters import *
# scipy
from scipy import stats

# data path
data_path="./precip_Arctic/"
file_name=data_path+"precip_statis_2x2.nc"

# read data
precp_file=netcdf_dataset(file_name,"r")
cnt_rn_cert=np.array(precp_file.variables["cnt_rain_cert"])
cnt_rn_poss=np.array(precp_file.variables["cnt_rain_poss"])
cnt_sn_cert=np.array(precp_file.variables["cnt_snow_cert"])
cnt_sn_poss=np.array(precp_file.variables["cnt_snow_poss"])
cnt_no_prec=np.array(precp_file.variables["cnt_noprecp"])
pre_rn_cert=np.array(precp_file.variables["pre_rain_cert"])
pre_rn_poss=np.array(precp_file.variables["pre_rain_poss"])
pre_sn_cert=np.array(precp_file.variables["pre_snow_cert"])
pre_sn_poss=np.array(precp_file.variables["pre_snow_poss"])
lat=np.array(precp_file.variables["lat"])
lon=np.array(precp_file.variables["lon"])
nlat=len(lat)
nlon=len(lon)

# calculate frequency and ratio
Пример #15
0
means_yby_ctl_net_diag=np.zeros((years.size,2,nlat)) #year by year mean for each variable
means_yby_exp_net_diag=np.zeros((years.size,2,nlat)) #year by year mean for each variable
means_ctl_net_diag=np.zeros((2,nlat)) #multi-year mean for each variable
means_exp_net_diag=np.zeros((2,nlat)) #multi-year mean for each variable
diffs_net_diag=np.zeros((2,nlat)) #multi-year exp-ctl diff for each variable
gm_yby_ctl_net_diag=np.zeros((2,years.size)) #year by year mean for each variable
gm_yby_exp_net_diag=np.zeros((2,years.size)) #year by year mean for each variable

means_yby_exp_fice=np.zeros((years.size,nlat)) #year by year mean for each variable
means_exp_fice=np.zeros((nlat)) #multi-year mean for each variable

for iy in range(0,years.size): 
    # open data file
    fctl=fpath_ctl+ctl_pref+"_ANN_"+str(years[iy])+".nc"
    fexp=fpath_exp+exp_pref+"_ANN_"+str(years[iy])+".nc"
    file_ctl=netcdf_dataset(fctl,"r")
    file_exp=netcdf_dataset(fexp,"r")

    fctl_diag=fpath_ctl_diag+ctl_pref_diag+"_climo_ANN.nc"
    fexp_diag=fpath_exp_diag+exp_pref_diag+"_climo_ANN.nc"
    file_ctl_diag=netcdf_dataset(fctl_diag,"r")
    file_exp_diag=netcdf_dataset(fexp_diag,"r")

    # read lat and lon
    lat=file_ctl.variables["lat"]
    lon=file_ctl.variables["lon"]
 
    means_yby_exp_fice[iy,:]=means_yby_exp_fice[iy,:]+np.mean(file_exp.variables["ICEFRAC"][0,:,:],axis=1)   

    # read data and calculate mean/min/max
    for vn in varnms_vis_dn:
Пример #16
0
def ocn_modelvsobs(config, field, streamMap=None, variableMap=None):

    """
    Plots a comparison of ACME/MPAS output to SST or MLD observations

    config is an instance of MpasAnalysisConfigParser containing configuration
    options.

    field is the name of a field to be analyize (currently one of 'mld' or
    'sst')

    If present, streamMap is a dictionary of MPAS-O stream names that map to
    their mpas_analysis counterparts.

    If present, variableMap is a dictionary of MPAS-O variable names that map
    to their mpas_analysis counterparts.

    Authors: Luke Van Roekel, Milena Veneziani, Xylar Asay-Davis
    Modified: 12/08/2016
    """

    # read parameters from config file
    indir = config.get('paths', 'archive_dir_ocn')

    streams_filename = config.get('input', 'ocean_streams_filename')
    streams = StreamsFile(streams_filename, streamsdir=indir)

    # get a list of timeSeriesStats output files from the streams file,
    # reading only those that are between the start and end dates
    startDate = config.get('time', 'climo_start_date')
    endDate = config.get('time', 'climo_end_date')
    streamName = streams.find_stream(streamMap['timeSeriesStats'])
    infiles = streams.readpath(streamName, startDate=startDate,
                               endDate=endDate)
    print 'Reading files {} through {}'.format(infiles[0], infiles[-1])

    plots_dir = config.get('paths', 'plots_dir')
    obsdir = config.get('paths', 'obs_' + field + 'dir')
    casename = config.get('case', 'casename')
    meshfile = config.get('data', 'mpas_meshfile')
    climo_yr1 = config.getint('time', 'climo_yr1')
    climo_yr2 = config.getint('time', 'climo_yr2')
    yr_offset = config.getint('time', 'yr_offset')

    outputTimes = config.getExpression(field + '_modelvsobs',
                                       'comparisonTimes')

    f = netcdf_dataset(meshfile, mode='r')
    lonCell = f.variables["lonCell"][:]
    latCell = f.variables["latCell"][:]

    varList = [field]

    if field == 'mld':

        selvals = None

        # Load MLD observational data
        obs_filename = "{}/holtetalley_mld_climatology.nc".format(obsdir)
        dsData = xr.open_mfdataset(obs_filename)

        # Increment month value to be consistent with the model output
        dsData.iMONTH.values += 1

        # Rename the time dimension to be consistent with the SST dataset
        dsData.rename({'month': 'calmonth'}, inplace=True)
        dsData.rename({'iMONTH': 'month'}, inplace=True)

        obsFieldName = 'mld_dt_mean'

        # Reorder dataset for consistence
        dsData = dsData.transpose('month', 'iLON', 'iLAT')

        # Set appropriate MLD figure labels
        obsTitleLabel = "Observations (HolteTalley density threshold MLD)"
        fileOutLabel = "mldHolteTalleyARGO"
        unitsLabel = 'm'

    elif field == 'sst':

        selvals = {'nVertLevels': 0}

        obs_filename = \
            "{}/MODEL.SST.HAD187001-198110.OI198111-201203.nc".format(obsdir)
        dsData = xr.open_mfdataset(obs_filename)
        # Select years for averaging (pre-industrial or present-day)
        # This seems fragile as definitions can change
        if yr_offset < 1900:
            time_start = datetime.datetime(1870, 1, 1)
            time_end = datetime.datetime(1900, 12, 31)
            preIndustrial_txt = "pre-industrial 1870-1900"
        else:
            time_start = datetime.datetime(1990, 1, 1)
            time_end = datetime.datetime(2011, 12, 31)
            preIndustrial_txt = "present-day 1990-2011"

        ds_tslice = dsData.sel(time=slice(time_start, time_end))
        monthly_clim_data = ds_tslice.groupby('time.month').mean('time')

        # Rename the observation data for code compactness
        dsData = monthly_clim_data.transpose('month', 'lon', 'lat')
        obsFieldName = 'SST'

        # Set appropriate figure labels for SST
        obsTitleLabel = \
            "Observations (Hadley/OI, {})".format(preIndustrial_txt)
        fileOutLabel = "sstHADOI"
        unitsLabel = r'$^o$C'

    elif field == 'sss':

        selvals = {'nVertLevels': 0}

        obs_filename = "{}/Aquarius_V3_SSS_Monthly.nc".format(obsdir)
        dsData = xr.open_mfdataset(obs_filename)

        time_start = datetime.datetime(2011, 8, 1)
        time_end = datetime.datetime(2014, 12, 31)

        ds_tslice = dsData.sel(time=slice(time_start, time_end))

        # The following line converts from DASK to numpy to supress an odd
        # warning that doesn't influence the figure output
        ds_tslice.SSS.values

        monthly_clim_data = ds_tslice.groupby('time.month').mean('time')

        # Rename the observation data for code compactness
        dsData = monthly_clim_data.transpose('month', 'lon', 'lat')
        obsFieldName = 'SSS'

        # Set appropriate figure labels for SSS
        preIndustrial_txt = "2011-2014"

        obsTitleLabel = "Observations (Aquarius, {})".format(preIndustrial_txt)
        fileOutLabel = 'sssAquarius'
        unitsLabel = 'PSU'

    ds = xr.open_mfdataset(
        infiles,
        preprocess=lambda x: preprocess_mpas(x, yearoffset=yr_offset,
                                             timestr='Time',
                                             onlyvars=varList,
                                             selvals=selvals,
                                             varmap=variableMap))
    ds = remove_repeated_time_index(ds)

    time_start = datetime.datetime(yr_offset+climo_yr1, 1, 1)
    time_end = datetime.datetime(yr_offset+climo_yr2, 12, 31)
    ds_tslice = ds.sel(Time=slice(time_start, time_end))
    monthly_clim = ds_tslice.groupby('Time.month').mean('Time')

    latData, lonData = np.meshgrid(dsData.lat.values, dsData.lon.values)
    latData = latData.flatten()
    lonData = lonData.flatten()

    daysarray = np.ones((12, dsData[obsFieldName].values.shape[1],
                         dsData[obsFieldName].values.shape[2]))

    for i, dval in enumerate(constants.dinmonth):
        daysarray[i, :, :] = dval
        inds = np.where(np.isnan(dsData[obsFieldName][i, :, :].values))
        daysarray[i, inds[0], inds[1]] = np.NaN

    # initialize interpolation variables
    d2, inds2, lonTarg, latTarg = init_tree(np.rad2deg(lonCell),
                                            np.rad2deg(latCell),
                                            constants.lonmin,
                                            constants.lonmax,
                                            constants.latmin,
                                            constants.latmax,
                                            constants.dLongitude,
                                            constants.dLatitude)
    d, inds, lonTargD, latTargD = init_tree(lonData, latData,
                                            constants.lonmin,
                                            constants.lonmax,
                                            constants.latmin,
                                            constants.latmax,
                                            constants.dLongitude,
                                            constants.dLatitude)
    nLon = lonTarg.shape[0]
    nLat = lonTarg.shape[1]

    modelOutput = np.zeros((len(outputTimes), nLon, nLat))
    observations = np.zeros((len(outputTimes), nLon, nLat))
    bias = np.zeros((len(outputTimes), nLon, nLat))

    # Interpolate and compute biases
    for i, timestring in enumerate(outputTimes):
        monthsvalue = constants.monthdictionary[timestring]

        if isinstance(monthsvalue, (int, long)):
            modeldata = monthly_clim.sel(month=monthsvalue)[field].values
            obsdata = dsData.sel(month=monthsvalue)[obsFieldName].values
        else:

            modeldata = (np.sum(
                constants.dinmonth[monthsvalue-1] *
                monthly_clim.sel(month=monthsvalue)[field].values.T, axis=1) /
                np.sum(constants.dinmonth[monthsvalue-1]))
            obsdata = (np.nansum(
                daysarray[monthsvalue-1, :, :] *
                dsData.sel(month=monthsvalue)[obsFieldName].values, axis=0) /
                np.nansum(daysarray[monthsvalue-1, :, :], axis=0))

        modelOutput[i, :, :] = interp_fields(modeldata, d2, inds2, lonTarg)
        observations[i, :, :] = interp_fields(obsdata.flatten(), d, inds,
                                              lonTargD)

    for i in range(len(outputTimes)):
        bias[i, :, :] = modelOutput[i, :, :] - observations[i, :, :]

    clevsModelObs = config.getExpression(field + '_modelvsobs',
                                         'clevsModelObs')
    cmap = plt.get_cmap(config.get(field + '_modelvsobs',
                                   'cmapModelObs'))
    cmapIndices = config.getExpression(field + '_modelvsobs',
                                       'cmapIndicesModelObs')
    cmapModelObs = cols.ListedColormap(cmap(cmapIndices), "cmapModelObs")
    clevsDiff = config.getExpression(field + '_modelvsobs',
                                     'clevsDiff')
    cmap = plt.get_cmap(config.get(field + '_modelvsobs', 'cmapDiff'))
    cmapIndices = config.getExpression(field + '_modelvsobs',
                                       'cmapIndicesDiff')
    cmapDiff = cols.ListedColormap(cmap(cmapIndices), "cmapDiff")

    for i in range(len(outputTimes)):
        fileout = "{}/{}_{}_{}_years{:04d}-{:04d}.png".format(
            plots_dir, fileOutLabel, casename, outputTimes[i], climo_yr1,
            climo_yr2)
        title = "{} ({}, years {:04d}-{:04d})".format(
            field.upper(), outputTimes[i], climo_yr1, climo_yr2)
        plot_global_comparison(config,
                               lonTarg,
                               latTarg,
                               modelOutput[i, :, :],
                               observations[i, :, :],
                               bias[i, :, :],
                               cmapModelObs,
                               clevsModelObs,
                               cmapDiff,
                               clevsDiff,
                               fileout=fileout,
                               title=title,
                               modelTitle="{}".format(casename),
                               obsTitle=obsTitleLabel,
                               diffTitle="Model-Observations",
                               cbarlabel=unitsLabel)
Пример #17
0
def load_level4_granule(variable, datasetId='', name=''):
    '''Loads a Level4 gridded Dataset from PODAAC
    :param variable: The name of the variable to read from the dataset.
    :type variable: :mod:`string`

    :param datasetId: dataset persistent ID. datasetId or \
        shortName is required for a granule search. Example: \
        PODAAC-ASOP2-25X01
    :type datasetId: :mod:`string`

    :param shortName: the shorter name for a dataset. \
        Either shortName or datasetId is required for a \
        granule search. Example: ASCATA-L2-25km
    :type shortName: :mod:`string`

    :param name: (Optional) A name for the loaded dataset.
    :type name: :mod:`string`

    :returns: A :class:`dataset.Dataset` containing the dataset pointed to by
        the OpenDAP URL.

    :raises: ServerError
    '''
    # Downloading the dataset using podaac toolkit
    podaac = Podaac()
    path = os.path.dirname(os.path.abspath(__file__))
    granuleName = podaac.extract_l4_granule(
        dataset_id=datasetId, path=path)
    path = path + '/' + granuleName
    d = netcdf_dataset(path, mode='r')
    dataset = d.variables[variable]

    # By convention, but not by standard, if the dimensions exist, they will be in the order:
    # time (t), altitude (z), latitude (y), longitude (x)
    # but conventions aren't always followed and all dimensions aren't always present so
    # see if we can make some educated deductions before defaulting to just pulling the first three
    # columns.
    temp_dimensions = list(map(lambda x: x.lower(), dataset.dimensions))
    dataset_dimensions = dataset.dimensions
    time = dataset_dimensions[temp_dimensions.index(
        'time') if 'time' in temp_dimensions else 0]
    lat = dataset_dimensions[temp_dimensions.index(
        'lat') if 'lat' in temp_dimensions else 1]
    lon = dataset_dimensions[temp_dimensions.index(
        'lon') if 'lon' in temp_dimensions else 2]

    # Time is given to us in some units since an epoch. We need to convert
    # these values to datetime objects. Note that we use the main object's
    # time object and not the dataset specific reference to it. We need to
    # grab the 'units' from it and it fails on the dataset specific object.
    times = np.array(convert_times_to_datetime(d[time]))
    lats = np.array(d.variables[lat][:])
    lons = np.array(d.variables[lon][:])
    values = np.array(dataset[:])
    origin = {
        'source': 'PO.DAAC',
        'url': 'podaac.jpl.nasa.gov/ws'
    }

    # Removing the downloaded temporary granule before creating the OCW
    # dataset.
    d.close()
    path = os.path.join(os.path.dirname(__file__), granuleName)
    os.remove(path)

    return Dataset(lats, lons, times, values, variable, name=name, origin=origin)
import numpy as np
# parameters
from get_parameters import get_area_mean_min_max

#def lon_lat_contour_model_vs_model(varnm,season,scale_ctl,scale_exp,table):
# data path
ctl_name = "CTL"  #os.environ["ctl_name"]
exp_name = "TSIS"  #os.environ["exp_name"]
fpath_ctl = '/raid00/xianwen/data/cesm211_solar_exp/solar_CTL_cesm211_ETEST-f19_g17-ens_mean_2010-2019/climo/'
fpath_exp = '/raid00/xianwen/data/cesm211_solar_exp/solar_TSIS_cesm211_ETEST-f19_g17-ens_mean_2010-2019/climo/'

f1 = fpath_ctl + "solar_CTL_cesm211_ETEST-f19_g17-ens_mean_2010-2019_climo_ANN.nc"
f2 = fpath_exp + "solar_TSIS_cesm211_ETEST-f19_g17-ens_mean_2010-2019_climo_ANN.nc"

# open data file
file_ctl = netcdf_dataset(f1, "r")
file_exp = netcdf_dataset(f2, "r")

# read lat and lon
lat = file_ctl.variables["lat"]
lon = file_ctl.variables["lon"]
lev = file_ctl.variables["lev"]
nlat = len(lat)
nlon = len(lon)
nlev = len(lev)
print(nlat, nlon, nlev)

#varnm="FSSDCLRS14"
varnm = "CLDLOW"  #
#varnm_off="FLUTC_OFF"  #offline computation
#units=r"W/m$^2$"
Пример #19
0
means_yby_exp_sfc = np.zeros(
    (years.size, varnms_sfc.size))  #year by year mean for each variable
means_ctl_sfc = np.zeros((varnms_sfc.size))  #multi-year mean for each variable
means_exp_sfc = np.zeros((varnms_sfc.size))  #multi-year mean for each variable
diffs_sfc = np.zeros(
    (varnms_sfc.size))  #multi-year exp-ctl diff for each variable
pvals_sfc = np.zeros((varnms_sfc.size))  #pvalues of ttest

for iy in range(0, years.size):
    # open data file
    fctl = fpath_ctl + ctl_pref + "_ANN_" + str(years[iy]) + ".nc"
    fexp = fpath_exp + exp_pref + "_ANN_" + str(years[iy]) + ".nc"
    fctl_fssd = fpath_ctl_fssd + ctl_fssd_pref + "_climo_ANN.nc"
    fexp_fssd = fpath_exp_fssd + exp_fssd_pref + "_climo_ANN.nc"

    file_ctl = netcdf_dataset(fctl, "r")
    file_exp = netcdf_dataset(fexp, "r")
    file_ctl_fssd = netcdf_dataset(fctl_fssd, "r")
    file_exp_fssd = netcdf_dataset(fexp_fssd, "r")

    # read lat and lon
    lat = file_ctl.variables["lat"]
    lon = file_ctl.variables["lon"]

    # read data and calculate mean/min/max
    for iv in range(0, varnms_toa.size):
        dtctl_toa=file_ctl_fssd.variables[varnms_toa[iv]][:,:,:] - \
                  file_ctl.variables[varnms_sub_toa[iv]][:,:,:]
        dtexp_toa=file_exp_fssd.variables[varnms_toa[iv]][:,:,:] - \
                  file_exp.variables[varnms_sub_toa[iv]][:,:,:]
        dtctl_sfc=file_ctl.variables[varnms_sfc[iv]][:,:,:] - \
Пример #20
0
from timeit import default_timer as timer
from datetime import date, datetime, timedelta
start = timer()

filelist = sorted(os.listdir('/home/scarani/Desktop/data/goes/001/'))
channel = 2

for i in range(1200, 1501, 1):

    print(i)

    file = str('/home/scarani/Desktop/data/goes/001/' + filelist[i])

    filename = os.path.join(os.path.dirname(ccrs.__file__), 'data', 'netcdf',
                            file)
    nc = netcdf_dataset(filename)

    sat_height = nc.variables[
        'goes_imager_projection'].perspective_point_height

    x = nc.variables['x'][:].data * sat_height
    y = nc.variables['y'][:].data * sat_height
    c = nc.variables['Rad'][:]
    data = nc.variables['Rad']
    satvar = nc.variables.keys()
    time = nc['t']

    proj_var = nc.variables[nc.variables['Rad'].grid_mapping]

    globe = ccrs.Globe(ellipse='sphere',
                       semimajor_axis=proj_var.semi_major_axis,
Пример #21
0
from cartopy import config
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER

#################
#open and read the dataset, save an iterable range of times
#note to zinka: time iterable is fricken stupid as it stands
#################

#create filepath to save png files to
filepath = 'PNG_files/'
if os.path.isdir(filepath) == False:
    os.mkdir(filepath)

dataset = netcdf_dataset('noaacwBLENDEDsstDaily_8423_ee32_0effevan.nc')
time = dataset.variables['analysed_sst'][:, 0, 0]
time = len(time)
time = np.arange(time)
time = np.asarray(time)
print(time)

#################
# Getting date labels to put in the images:
#################
time_label = dataset.variables[
    'time'][:]  #Time is in epoch time, need to convert it to human readable time
#Use datetime to convert to human readable:
time_list = []
for x in time:
    lab = datetime.datetime.fromtimestamp(
Пример #22
0
import os
import matplotlib.pyplot as plt
from netCDF4 import Dataset as netcdf_dataset

import cartopy.crs as ccrs

DIRBIN = os.path.dirname(os.path.abspath(__file__))

infile = os.path.join(
        DIRBIN,
        "b.e11.BRCP85C5CNBDRD.f09_g16.021.cice.h.hi_nh.208101-210012.nc"
        )

dataset = netcdf_dataset(infile)
sst = dataset.variables['hi'][0, :, :]
lats = dataset.variables['TLAT'][:]
lons = dataset.variables['TLON'][:]

ax = plt.axes(projection=ccrs.PlateCarree())

plt.contourf(lons, lats, sst, 60,
             transform=ccrs.PlateCarree())

ax.coastlines()

plt.show()
Пример #23
0
import numpy as np
# parameters
from get_parameters import get_area_mean_min_max

#def lon_lat_contour_model_vs_model(varnm,season,scale_ctl,scale_exp,table):
# data path
ctl_name = "CTL"  #os.environ["ctl_name"]
exp_name = "TSIS"  #os.environ["exp_name"]
fpath_ctl = '/Volumes/WD4T_1/cesm2_solar_exp/solar_CTL_cesm211_ETEST-f19_g17-ens_mean_2010-2019/climo/'
#fpath_exp='/Volumes/WD4T_1/cesm2_solar_exp/solar_TSIS_cesm211_ETEST-f19_g17-ens_mean_2010-2019/climo/'

f1 = fpath_ctl + "solar_CTL_cesm211_ETEST-f19_g17-ens_mean_2010-2019_climo_ANN.nc"
#f2=fpath_exp+"solar_TSIS_cesm211_ETEST-f19_g17-ens_mean_2010-2019_climo_ANN.nc"

# open data file
file_ctl = netcdf_dataset(f1, "r")
#file_exp=netcdf_dataset(f2,"r")

# read lat and lon
lat = file_ctl.variables["lat"]
lon = file_ctl.variables["lon"]
lev = file_ctl.variables["lev"]

nlat = len(lat)
nlon = len(lon)
nlev = len(lev)

#varnm="FSSDCLRS14"
varnm = "FSSUS10"  # FSSUS10, FSSUS08
varnm2 = "FSSDS10"  # FSSDS10, FSSDS08
varnm3 = "FSSUS08"  # FSSUS10, FSSUS08
    def plotsat():
        nc = netcdf_dataset(localfile.name)

        sat_height = nc.variables[
            'goes_imager_projection'].perspective_point_height

        x = nc.variables['x'][:].data * sat_height
        y = nc.variables['y'][:].data * sat_height
        c = nc.variables['CMI_C02'][:]
        data = nc.variables['CMI_C02']
        satvar = nc.variables.keys()
        time = nc['t']

        proj_var = nc.variables[nc.variables['CMI_C13'].grid_mapping]

        globe = ccrs.Globe(ellipse='sphere',
                           semimajor_axis=proj_var.semi_major_axis,
                           semiminor_axis=proj_var.semi_minor_axis)

        proj = ccrs.Geostationary(central_longitude=-75,
                                  sweep_axis='x',
                                  satellite_height=sat_height,
                                  globe=globe)

        north = y.max()
        south = y.min()
        east = x.max()
        west = x.min()

        x, y = np.meshgrid(x, y)
        fig = plt.figure(figsize=(15, 15))
        ax = fig.add_subplot(1, 1, 1, projection=proj)
        ax.set_xlim(west, east)
        ax.set_ylim(south, north)

        #    vmin = 190 #13
        #    vmax = 305 #13
        #    colormap = 'Greys' #13
        vmin = 0.03  #02
        vmax = 1.2  #02
        colormap = 'Greys_r'  #02

        im = ax.pcolormesh(x, y, c, cmap=colormap, vmin=vmin, vmax=vmax)
        ax.add_feature(cfeature.STATES, linewidth=2, edgecolor='black')
        ax.coastlines(resolution='10m', linewidth=1, edgecolor='black')
        ax.add_feature(cfeature.BORDERS, linewidth=1, edgecolor='black')

        cbar_ticks = np.arange(vmin, vmax,
                               round(((abs(vmax) - abs(vmin)) / 6), 2))
        cbar = fig.colorbar(im,
                            ticks=cbar_ticks,
                            orientation='horizontal',
                            shrink=0.6,
                            pad=.02)
        cbar.ax.set_yticklabels(str(cbar_ticks))

        cbar.set_label(data.units, rotation=0, labelpad=5, fontsize=13)

        # Figure Text
        txt = open(
            '/home/scarani/Desktop/precipitation-onset/GOES/channel_title.txt',
            "r")
        titles = txt.readlines()
        channel = data.ancillary_variables[-2:]
        ch = titles[int(channel)][:-1]
        #times = str(str(.time.values)[:-10]+ 'UTC')
        orbital_slot = nc.orbital_slot

        fmt = '%Y%m%d_%H%M%S'
        epoch = '20000101_120000'

        sd = datetime.strptime(epoch, fmt)
        times = sd + timedelta(seconds=int(time[0].data))
        time_title = times.strftime('%Y-%m-%dT%H:%M:%SUTC')
        savetime = times.strftime('%Y%m%d%H%M%S')

        sector = '***sector error***'
        if nc.dataset_name[15:-58] == 'M1':
            sector = 'Mesoscale-1'

        if nc.dataset_name[15:-58] == 'M2':
            sector = 'Mesoscale-2'

        if nc.dataset_name[15:-58] == 'F':
            sector = 'Disk'

        if nc.dataset_name[15:-58] == 'C':
            sector = 'CONUS'

        ax.set_title(orbital_slot + ': ' + sector + '\n' + ch + '\n' +
                     str(time_title),
                     fontsize=17)

        plt.savefig(savelocation + str(savetime) + '_' + str(scan) + '_' +
                    str(channel) + '.png',
                    bbox_inches='tight',
                    dpi=300)

        plt.show()
        plt.close()

        gc.collect()

        mid = timer()
        print(round((mid - start) / 60, 2))
Пример #25
0
                                         'clevsDiff_conc_{}'.format(season))
        cmap = plt.get_cmap(config.get('seaice_modelvsobs', 'cmapDiff'))
        cmapIndices = config.getExpression('seaice_modelvsobs',
                                           'cmapIndicesDiff')
        cmapDiff = cols.ListedColormap(cmap(cmapIndices), "cmapDiff")

        lon0 = config.getfloat('seaice_modelvsobs',
                               'lon0_{}'.format(hemisphere))
        latmin = config.getfloat('seaice_modelvsobs',
                                 'latmin_{}'.format(hemisphere))

        # Load in sea-ice data
        #  Model...
        # ice concentrations
        fileName = "{}/{}".format(climodir_regridded, climofiles[climName])
        f = netcdf_dataset(fileName, mode='r')
        iceconc = f.variables["iceAreaCell"][:]
        if (first):
            lons = f.variables["lon"][:]
            lats = f.variables["lat"][:]
            print "Min lon: ", np.amin(lons), "Max lon: ", np.amax(lons)
            print "Min lat: ", np.amin(lats), "Max lat: ", np.amax(lats)
            Lons, Lats = np.meshgrid(lons, lats)
            first = False
        f.close()

        #  ...and observations
        # ice concentrations from NASATeam (or Bootstrap) algorithm
        for obsName in ['NASATeam', 'Bootstrap']:

            fileName = obs_iceconc_filenames['{}_{}'.format(climName, obsName)]
Пример #26
0
def getData(dataset_id, parameter, qbounds, time):
    #print(qbounds)
    #print(parameter)
    #print(time)

    lat1 = qbounds[0]
    lat2 = qbounds[1]
    lon1 = qbounds[2]
    lon2 = qbounds[3]
    lat_order = qbounds[4]
    altf = qbounds[5]
    sub  = qbounds[6]
    
    time1 = time # using just one timestamp for this example
    time2 = time
    
    if altf == 1:
        #write query with altitude, haven't tested this
        alt1 = float(qbounds[7])
        alt2 = float(qbounds[8])
        altsub = 1
        query = parameter+'[(%s):%s:(%s)][(%f):%s:(%f)][(%f):%s:(%f)][(%f):%s:(%f)]' % (time1,sub,time2,alt1, altsub, alt2, lat1,sub,lat2,lon1,sub,lon2) 
    else:
        #write query without altitude
        query = parameter+'[(%s):%s:(%s)][(%f):%s:(%f)][(%f):%s:(%f)]' % (time1,sub,time2,lat1,sub,lat2,lon1,sub,lon2) 
    
    base_url = 'http://polarwatch.noaa.gov/erddap/griddap/'+ dataset_id +'.nc?'
    url = base_url + query
    print(url)
    datasetObj = {
        "url" : url
        }
    
    file = 'dataset.nc'

    http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
    try:
        r = http.request('GET', url, preload_content=False)
    except urllib3.exceptions.HTTPError as he:          
        # Update config file to show that the dataset is not valid 
        datasetObj["inERDDAP"] = 0
        datasetObj["inERDDAPReason"] = "Could not access actual data when trying to make plots"
        entry["validDatasets"][subEntry] = 0
        logger.error('  * The server couldnt fullfill the request. Adding a not valid flag and not updating graphics today.')
    except urllib3.exceptions.URLError as e:
        logger.error('  * Failed to reach erddap server. Stopping.')
        logger.error('  * reason: ', e.reason)
        datasetObj["inERDDAP"] = 0
        datasetObj["inERDDAPReason"] = "Failed to reach ERDDAP when trying to make plots"
        entry["validDatasets"][subEntry] = 0
    except Exception:
        logger.error('generic exception: ' + traceback.format_exc())
        datasetObj["inERDDAP"] = 0
        datasetObj["inERDDAPReason"] = "General Error when trying to make plots (likely timeout)"
    else:
        # update the file with the new data
        with open(file, 'wb') as out:
            while True:
                data = r.read(1024*1024)
                if not data:
                    break
                out.write(data)

        r.release_conn()
        print ('Satellite Data File Retrieved')

        #Check netcdf response
        try:
            data_netcdf_dataset = netcdf_dataset(file)
        except Exception:
            print(' an unusual error has occured')
            datasetObj["inERDDAP"] = 0
            datasetObj["inERDDAPReason"] = "Valid NetCDF not returned in make images"
            getDataError = 'generic exception: ' + traceback.format_exc()
            datasetObj["goodFile"] = 0
            datasetObj["error"] = getDataError
            datasetObj["data_netcdf_dataset"] = 0
        else:    
            print('retrieved valid netcdf')
            datasetObj["data_netcdf_dataset"] = data_netcdf_dataset
            datasetObj["goodFile"] = 1
            datasetObj["inERDDAP"] = 1


    return datasetObj   
Пример #27
0
def getClosestIceMask(logger, date_str):

    print('Date of data: '+ date_str)
    
    date_datetime= dateutil.parser.parse(date_str)
    date_datetime.replace(tzinfo=timezone('UTC'))   
    searchStartDate_datetime = date_datetime - timedelta(days=33)
    searchStartDate_str = searchStartDate_datetime.strftime("%Y-%m-%dT%I:%M:%SZ")
    searchEndDate_datetime = date_datetime + timedelta(days=33)
    searchEndDate_str = searchEndDate_datetime.strftime("%Y-%m-%dT%I:%M:%SZ")
    
    # See if we need to modify these times based 
    # on the extent of the ice mask dataset time range
    # because erddap throws an error if the date is too 
    # far into the future outside of the range of the dataset

    maskInfoUrl = 'https://polarwatch.noaa.gov/erddap/info/jplMURSST41/index.csv'
    http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
    
    try:
        r = http.request('GET', maskInfoUrl, preload_content=False, timeout=20)
    except:
        logger.error("  * Error: Retrieving ice mask info file")
    else:
        reader = csv.reader(io.StringIO(r.read().decode('utf-8')), delimiter=',')
        for row in reader:
            if row[0] == "attribute" and row[2] == "time_coverage_end":
                maxIceMaskDate_datetime = dateutil.parser.parse(row[4])
                maxIceMaskDate_datetime.replace(tzinfo=timezone('UTC'))
                # compare times, replace if needed
                if maxIceMaskDate_datetime < searchEndDate_datetime:
                    print('replacing')
                    searchEndDate_str = maxIceMaskDate_datetime.strftime("%Y-%m-%dT%I:%M:%SZ")

    # create a plus or minus two months on that date to grab a limited section of the times
    # get ice mask times from erddap so we can search and find the closest one

    iceMaskTimesUrl = 'https://polarwatch.noaa.gov/erddap/griddap/jplMURSST41.json?time[(' + searchStartDate_str + '):1:(' + searchEndDate_str+ ')]'                                      

    try:
        response = http.request('GET', iceMaskTimesUrl, timeout=15)
    except urllib3.exceptions.HTTPError as e:
        logger.error('The server couldnt fullfill the request.')
        logger.error('Error Code: ', e.code)
    except urllib3.exceptions.URLError as e:
        logger.error('Failed to reach erddap server for: '+ iceMaskTimesUrl)
        logger.error('reason: ', e.reason)
    else:
        timesResponse = json.loads(response.data.decode('utf-8'))
        try:
            timesResponse = json.loads(response.data.decode('utf-8'))
        except:
            print('couldn''t read json')
            gotTimes=0
        else:
            timeList_datetime = []
            for val in timesResponse['table']['rows']:
                val_datetime = dateutil.parser.parse(val[0])
                val_datetime.replace(tzinfo=timezone('UTC'))
                timeList_datetime.append(val_datetime)
            gotTimes =1
                
    # find the closest time
    if gotTimes == 1 :    
        timeList_datetime = np.array(timeList_datetime)    
        timeList_datetime.sort
        datematch = np.searchsorted(timeList_datetime, date_datetime )  
        closest_date_str = timeList_datetime[datematch-1].strftime("%Y-%m-%dT%I:%M:%SZ")
        print('Date of ice mask request: '+ closest_date_str)
        
        #now request that mask
        maskUrl = 'https://polarwatch.noaa.gov/erddap/griddap/jplMURSST41.nc?mask[('+closest_date_str+'):1:('+closest_date_str+')][(-89.990000):35:(89.990000)][(-179.990000):71:(180.000000)]'
        print(maskUrl)
        try:
            r = http.request('GET', maskUrl, preload_content=False, timeout=70)
        except urllib3.exceptions.HTTPError as he:
            print('HTTP ERROR: the server couldnt fulfill the request')
            print(he)
        except urllib3.exceptions.URLError as e:
            logger.error('  * Failed to reach erddap server. Stopping.')
            logger.error('  * reason: ', e.reason)
        except Exception:
            logger.error('generic exception: ' + traceback.format_exc())
        else:
            with open('icemask.nc', 'wb') as out:
                while True:
                    icedata = r.read(1024*1024)
                    if not icedata:
                        break
                    out.write(icedata)
            r.release_conn()

        # Try opening the returned mask data file to confirm we retrieved valid data
        maskNetCDF = netcdf_dataset('icemask.nc')

        return maskNetCDF
Пример #28
0
import os
import matplotlib.pyplot as plt
from netCDF4 import Dataset as netcdf_dataset
import numpy as np

from cartopy import config
import cartopy.crs as ccrs

dataset = netcdf_dataset('data.nc')
dimensions = dataset.variables.keys()

sst = dataset.variables['u10'][0, :, :]
lats = dataset.variables['latitude'][:]
lons = dataset.variables['longitude'][:]

print(sst, lats, lons)
ax = plt.axes(projection=ccrs.PlateCarree())

plt.contourf(lons, lats, sst, 60, transform=ccrs.PlateCarree())

ax.coastlines()

plt.show()
Пример #29
0
def ohc_timeseries(config, streamMap=None, variableMap=None):
    """
    Performs analysis of ocean heat content (OHC) from time-series output.
    config is an instance of an MpasAnalysisConfigParser containing
    configuration options.

    config is an instance of MpasAnalysisConfigParser containing configuration
    options.

    If present, streamMap is a dictionary of MPAS-O stream names that map to
    their mpas_analysis counterparts.

    If present, variableMap is a dictionary of MPAS-O variable names that map
    to their mpas_analysis counterparts.

    Author: Xylar Asay-Davis, Milena Veneziani
    Last Modified: 12/04/2016
    """

    # read parameters from config file
    indir = config.get('paths', 'archive_dir_ocn')

    namelist_filename = config.get('input', 'ocean_namelist_filename')
    namelist = NameList(namelist_filename, path=indir)

    streams_filename = config.get('input', 'ocean_streams_filename')
    streams = StreamsFile(streams_filename, streamsdir=indir)

    # Note: input file, not a mesh file because we need dycore specific fields
    # such as refBottomDepth and namelist fields such as config_density0 that
    # are not guaranteed to be in the mesh file.
    inputfile = streams.readpath('input')[0]

    # get a list of timeSeriesStats output files from the streams file,
    # reading only those that are between the start and end dates
    startDate = config.get('time', 'timeseries_start_date')
    endDate = config.get('time', 'timeseries_end_date')
    streamName = streams.find_stream(streamMap['timeSeriesStats'])
    infiles = streams.readpath(streamName, startDate=startDate,
                               endDate=endDate)
    print 'Reading files {} through {}'.format(infiles[0], infiles[-1])

    casename = config.get('case', 'casename')
    ref_casename_v0 = config.get('case', 'ref_casename_v0')
    indir_v0data = config.get('paths', 'ref_archive_v0_ocndir')

    compare_with_obs = config.getboolean('ohc_timeseries', 'compare_with_obs')

    plots_dir = config.get('paths', 'plots_dir')

    yr_offset = config.getint('time', 'yr_offset')

    N_movavg = config.getint('ohc_timeseries', 'N_movavg')

    regions = config.getExpression('regions', 'regions')
    plot_titles = config.getExpression('regions', 'plot_titles')
    iregions = config.getExpression('ohc_timeseries', 'regionIndicesToPlot')

    # Define/read in general variables
    print "  Read in depth and compute specific depth indexes..."
    f = netcdf_dataset(inputfile, mode='r')
    # reference depth [m]
    depth = f.variables["refBottomDepth"][:]
    # specific heat [J/(kg*degC)]
    cp = namelist.getfloat('config_specific_heat_sea_water')
    # [kg/m3]
    rho = namelist.getfloat('config_density0')
    fac = 1e-22*rho*cp

    k700m = np.where(depth > 700.)[0][0] - 1
    k2000m = np.where(depth > 2000.)[0][0] - 1

    kbtm = len(depth)-1

    # Load data
    print "  Load ocean data..."
    varList = ['avgLayerTemperature',
               'sumLayerMaskValue',
               'avgLayerArea',
               'avgLayerThickness']
    ds = xr.open_mfdataset(
        infiles,
        preprocess=lambda x: preprocess_mpas(x,
                                             yearoffset=yr_offset,
                                             timestr='Time',
                                             onlyvars=varList,
                                             varmap=variableMap))

    ds = remove_repeated_time_index(ds)

    # convert the start and end dates to datetime objects using
    # the Date class, which ensures the results are within the
    # supported range
    time_start = Date(startDate).to_datetime(yr_offset)
    time_end = Date(endDate).to_datetime(yr_offset)
    # select only the data in the specified range of years
    ds = ds.sel(Time=slice(time_start, time_end))

    # Select year-1 data and average it (for later computing anomalies)
    time_start = datetime.datetime(time_start.year, 1, 1)
    time_end = datetime.datetime(time_start.year, 12, 31)
    ds_yr1 = ds.sel(Time=slice(time_start, time_end))
    mean_yr1 = ds_yr1.mean('Time')

    print "  Compute temperature anomalies..."
    avgLayerTemperature = ds.avgLayerTemperature
    avgLayerTemperature_yr1 = mean_yr1.avgLayerTemperature

    avgLayTemp_anomaly = avgLayerTemperature - avgLayerTemperature_yr1

    year_start = (pd.to_datetime(ds.Time.min().values)).year
    year_end = (pd.to_datetime(ds.Time.max().values)).year
    time_start = datetime.datetime(year_start, 1, 1)
    time_end = datetime.datetime(year_end, 12, 31)

    if ref_casename_v0 != "None":
        print "  Load in OHC for ACMEv0 case..."
        infiles_v0data = "{}/OHC.{}.year*.nc".format(
            indir_v0data, ref_casename_v0)
        ds_v0 = xr.open_mfdataset(
            infiles_v0data,
            preprocess=lambda x: preprocess_mpas(x, yearoffset=yr_offset))
        ds_v0 = remove_repeated_time_index(ds_v0)
        ds_v0_tslice = ds_v0.sel(Time=slice(time_start, time_end))

    sumLayerMaskValue = ds.sumLayerMaskValue
    avgLayerArea = ds.avgLayerArea
    avgLayerThickness = ds.avgLayerThickness

    print "  Compute OHC and make plots..."
    for index in range(len(iregions)):
        iregion = iregions[index]

        # Compute volume of each layer in the region:
        layerArea = sumLayerMaskValue[:, iregion, :] * \
            avgLayerArea[:, iregion, :]
        layerVolume = layerArea * avgLayerThickness[:, iregion, :]

        # Compute OHC:
        ohc = layerVolume * avgLayTemp_anomaly[:, iregion, :]
        # OHC over 0-bottom depth range:
        ohc_tot = ohc.sum('nVertLevels')
        ohc_tot = fac*ohc_tot

        # OHC over 0-700m depth range:
        ohc_700m = fac*ohc[:, 0:k700m].sum('nVertLevels')

        # OHC over 700m-2000m depth range:
        ohc_2000m = fac*ohc[:, k700m+1:k2000m].sum('nVertLevels')

        # OHC over 2000m-bottom depth range:
        ohc_btm = ohc[:, k2000m+1:kbtm].sum('nVertLevels')
        ohc_btm = fac*ohc_btm

        title = 'OHC, {}, 0-bottom (thick-), 0-700m (thin-), 700-2000m (--),' \
            ' 2000m-bottom (-.) \n {}'.format(plot_titles[iregion], casename)

        xlabel = "Time [years]"
        ylabel = "[x$10^{22}$ J]"

        if ref_casename_v0 != "None":
            figname = "{}/ohc_{}_{}_{}.png".format(plots_dir,
                                                   regions[iregion],
                                                   casename,
                                                   ref_casename_v0)
            ohc_v0_tot = ds_v0_tslice.ohc_tot
            ohc_v0_700m = ds_v0_tslice.ohc_700m
            ohc_v0_2000m = ds_v0_tslice.ohc_2000m
            ohc_v0_btm = ds_v0_tslice.ohc_btm
            title = "{} (r), {} (b)".format(title, ref_casename_v0)
            timeseries_analysis_plot(config, [ohc_tot, ohc_700m, ohc_2000m,
                                              ohc_btm, ohc_v0_tot, ohc_v0_700m,
                                              ohc_v0_2000m, ohc_v0_btm],
                                     N_movavg, title, xlabel, ylabel, figname,
                                     lineStyles=['r-', 'r-', 'r--', 'r-.',
                                                 'b-', 'b-', 'b--', 'b-.'],
                                     lineWidths=[2, 1, 1.5, 1.5, 2, 1, 1.5,
                                                 1.5])

        if not compare_with_obs and ref_casename_v0 == "None":
            figname = "{}/ohc_{}_{}.png".format(plots_dir, regions[iregion],
                                                casename)
            timeseries_analysis_plot(config, [ohc_tot, ohc_700m, ohc_2000m,
                                              ohc_btm],
                                     N_movavg, title, xlabel, ylabel, figname,
                                     lineStyles=['r-', 'r-', 'r--', 'r-.'],
                                     lineWidths=[2, 1, 1.5, 1.5])
Пример #30
0
def ohc_timeseries(config, streamMap=None, variableMap=None):
    """
    Performs analysis of ocean heat content (OHC) from time-series output.
    config is an instance of an MpasAnalysisConfigParser containing
    configuration options.

    config is an instance of MpasAnalysisConfigParser containing configuration
    options.

    If present, streamMap is a dictionary of MPAS-O stream names that map to
    their mpas_analysis counterparts.

    If present, variableMap is a dictionary of MPAS-O variable names that map
    to their mpas_analysis counterparts.

    Author: Xylar Asay-Davis, Milena Veneziani
    Last Modified: 01/07/2017
    """

    # read parameters from config file
    casename = config.get('case', 'casename')
    ref_casename_v0 = config.get('case', 'ref_casename_v0')
    indir_v0data = config.get('paths', 'ref_archive_v0_ocndir')

    compare_with_obs = config.getboolean('ohc_timeseries', 'compare_with_obs')

    plots_dir = config.get('paths', 'plots_dir')

    yr_offset = config.getint('time', 'yr_offset')

    N_movavg = config.getint('ohc_timeseries', 'N_movavg')

    regions = config.getExpression('regions', 'regions')
    plot_titles = config.getExpression('regions', 'plot_titles')
    iregions = config.getExpression('ohc_timeseries', 'regionIndicesToPlot')

    indir = config.get('paths', 'archive_dir_ocn')

    namelist_filename = config.get('input', 'ocean_namelist_filename')
    namelist = NameList(namelist_filename, path=indir)

    streams_filename = config.get('input', 'ocean_streams_filename')
    streams = StreamsFile(streams_filename, streamsdir=indir)

    # Note: input file, not a mesh file because we need dycore specific fields
    # such as refBottomDepth and namelist fields such as config_density0, as
    # well as simulationStartTime, that are not guaranteed to be in the mesh file.
    try:
        inputfile = streams.readpath('restart')[0]
    except ValueError:
        raise IOError(
            'No MPAS-O restart file found: need at least one restart file for OHC calculation'
        )

    # get a list of timeSeriesStats output files from the streams file,
    # reading only those that are between the start and end dates
    startDate = config.get('time', 'timeseries_start_date')
    endDate = config.get('time', 'timeseries_end_date')
    streamName = streams.find_stream(streamMap['timeSeriesStats'])
    infiles = streams.readpath(streamName,
                               startDate=startDate,
                               endDate=endDate)
    print 'Reading files {} through {}'.format(infiles[0], infiles[-1])

    # Define/read in general variables
    print '  Read in depth and compute specific depth indexes...'
    f = netcdf_dataset(inputfile, mode='r')
    # reference depth [m]
    depth = f.variables['refBottomDepth'][:]
    # simulation start time
    simStartTime = netCDF4.chartostring(f.variables['simulationStartTime'][:])
    simStartTime = str(simStartTime)
    f.close()
    # specific heat [J/(kg*degC)]
    cp = namelist.getfloat('config_specific_heat_sea_water')
    # [kg/m3]
    rho = namelist.getfloat('config_density0')
    fac = 1e-22 * rho * cp

    k700m = np.where(depth > 700.)[0][0] - 1
    k2000m = np.where(depth > 2000.)[0][0] - 1

    kbtm = len(depth) - 1

    # Load data
    print '  Load ocean data...'
    varList = [
        'avgLayerTemperature', 'sumLayerMaskValue', 'avgLayerArea',
        'avgLayerThickness'
    ]
    ds = xr.open_mfdataset(
        infiles,
        preprocess=lambda x: preprocess_mpas(x,
                                             yearoffset=yr_offset,
                                             timestr='Time',
                                             onlyvars=varList,
                                             varmap=variableMap))

    ds = remove_repeated_time_index(ds)

    # convert the start and end dates to datetime objects using
    # the Date class, which ensures the results are within the
    # supported range
    time_start = Date(startDate).to_datetime(yr_offset)
    time_end = Date(endDate).to_datetime(yr_offset)
    # select only the data in the specified range of years
    ds = ds.sel(Time=slice(time_start, time_end))

    # Select year-1 data and average it (for later computing anomalies)
    time_start_yr1 = Date(simStartTime).to_datetime(yr_offset)
    if time_start_yr1 < time_start:
        startDate_yr1 = simStartTime
        endDate_yr1 = startDate_yr1[0:5] + '12-31' + startDate_yr1[10:]
        infiles_yr1 = streams.readpath(streamName,
                                       startDate=startDate_yr1,
                                       endDate=endDate_yr1)
        ds_yr1 = xr.open_mfdataset(
            infiles_yr1,
            preprocess=lambda x: preprocess_mpas(x,
                                                 yearoffset=yr_offset,
                                                 timestr='Time',
                                                 onlyvars=varList,
                                                 varmap=variableMap))

        ds_yr1 = remove_repeated_time_index(ds_yr1)
    else:
        time_start = datetime.datetime(time_start.year, 1, 1)
        time_end = datetime.datetime(time_start.year, 12, 31)
        ds_yr1 = ds.sel(Time=slice(time_start, time_end))
    mean_yr1 = ds_yr1.mean('Time')

    print '  Compute temperature anomalies...'
    avgLayerTemperature = ds.avgLayerTemperature
    avgLayerTemperature_yr1 = mean_yr1.avgLayerTemperature

    avgLayTemp_anomaly = avgLayerTemperature - avgLayerTemperature_yr1

    year_start = (pd.to_datetime(ds.Time.min().values)).year
    year_end = (pd.to_datetime(ds.Time.max().values)).year
    time_start = datetime.datetime(year_start, 1, 1)
    time_end = datetime.datetime(year_end, 12, 31)

    if ref_casename_v0 != 'None':
        print '  Load in OHC for ACMEv0 case...'
        infiles_v0data = '{}/OHC.{}.year*.nc'.format(indir_v0data,
                                                     ref_casename_v0)
        ds_v0 = xr.open_mfdataset(
            infiles_v0data,
            preprocess=lambda x: preprocess_mpas(x, yearoffset=yr_offset))
        ds_v0 = remove_repeated_time_index(ds_v0)
        year_end_v0 = (pd.to_datetime(ds_v0.Time.max().values)).year
        if year_start <= year_end_v0:
            ds_v0_tslice = ds_v0.sel(Time=slice(time_start, time_end))
        else:
            print '   Warning: v0 time series lies outside current bounds of v1 time series. Skipping it.'
            ref_casename_v0 = 'None'

    sumLayerMaskValue = ds.sumLayerMaskValue
    avgLayerArea = ds.avgLayerArea
    avgLayerThickness = ds.avgLayerThickness

    print '  Compute OHC and make plots...'
    for index in range(len(iregions)):
        iregion = iregions[index]

        # Compute volume of each layer in the region:
        layerArea = sumLayerMaskValue[:, iregion, :] * \
            avgLayerArea[:, iregion, :]
        layerVolume = layerArea * avgLayerThickness[:, iregion, :]

        # Compute OHC:
        ohc = layerVolume * avgLayTemp_anomaly[:, iregion, :]
        # OHC over 0-bottom depth range:
        ohc_tot = ohc.sum('nVertLevels')
        ohc_tot = fac * ohc_tot

        # OHC over 0-700m depth range:
        ohc_700m = fac * ohc[:, 0:k700m].sum('nVertLevels')

        # OHC over 700m-2000m depth range:
        ohc_2000m = fac * ohc[:, k700m + 1:k2000m].sum('nVertLevels')

        # OHC over 2000m-bottom depth range:
        ohc_btm = ohc[:, k2000m + 1:kbtm].sum('nVertLevels')
        ohc_btm = fac * ohc_btm

        title = 'OHC, {}, 0-bottom (thick-), 0-700m (thin-), 700-2000m (--),' \
                ' 2000m-bottom (-.) \n {}'.format(plot_titles[iregion], casename)

        xlabel = 'Time [years]'
        ylabel = '[x$10^{22}$ J]'

        if ref_casename_v0 != 'None':
            figname = '{}/ohc_{}_{}_{}.png'.format(plots_dir, regions[iregion],
                                                   casename, ref_casename_v0)
            ohc_v0_tot = ds_v0_tslice.ohc_tot
            ohc_v0_700m = ds_v0_tslice.ohc_700m
            ohc_v0_2000m = ds_v0_tslice.ohc_2000m
            ohc_v0_btm = ds_v0_tslice.ohc_btm
            title = '{} (r), {} (b)'.format(title, ref_casename_v0)
            timeseries_analysis_plot(
                config, [
                    ohc_tot, ohc_700m, ohc_2000m, ohc_btm, ohc_v0_tot,
                    ohc_v0_700m, ohc_v0_2000m, ohc_v0_btm
                ],
                N_movavg,
                title,
                xlabel,
                ylabel,
                figname,
                lineStyles=[
                    'r-', 'r-', 'r--', 'r-.', 'b-', 'b-', 'b--', 'b-.'
                ],
                lineWidths=[2, 1, 1.5, 1.5, 2, 1, 1.5, 1.5])

        if not compare_with_obs and ref_casename_v0 == 'None':
            figname = '{}/ohc_{}_{}.png'.format(plots_dir, regions[iregion],
                                                casename)
            timeseries_analysis_plot(config,
                                     [ohc_tot, ohc_700m, ohc_2000m, ohc_btm],
                                     N_movavg,
                                     title,
                                     xlabel,
                                     ylabel,
                                     figname,
                                     lineStyles=['r-', 'r-', 'r--', 'r-.'],
                                     lineWidths=[2, 1, 1.5, 1.5])
Пример #31
0
#from get_parameters import *

# scipy
from scipy import stats

# data path
ctl_name = "CESM2"  #os.environ["ctl_name"]
exp_name = "TSIS-1"  #os.environ["exp_name"]
ctl_pref = "solar_CTL_cesm211_ETEST-f19_g17-ens_mean_2010-2019"
#exp_pref="solar_TSIS_cesm211_ETEST-f19_g17-ens_mean_2010-2019"

fpath_ctl = "/Volumes/WD4T_1/cesm2_solar_exp/" + ctl_pref + "/climo/"
#fpath_exp="/raid00/xianwen/data/cesm211_solar_exp/"+exp_pref+"/climo/"

fctl = fpath_ctl + "solar_CTL_cesm211_ETEST-f19_g17-ens_mean_2010-2019_climo_ANN.nc"
file_ctl = netcdf_dataset(fctl, "r")
#years=np.arange(2010,2020)
#months_all=["01","02","03","04","05","06","07","08","09","10","11","12"]

#varnm="ICEFRAC"

varnm = "FSSUS10"  # FSSUS10, FSSUS08
varnm2 = "FSSDS10"  # FSSDS10, FSSDS08
varnm3 = "FSSUS08"  # FSSUS10, FSSUS08
varnm4 = "FSSDS08"  # FSSDS10, FSSDS08
#varnm_off="FLUTC_OFF"  #offline computation
#units=r"W/m$^2$"
units = ""
figure_name = "lat_lon_albedo_VIS"

dtctl = file_ctl.variables[varnm][0, :, :] / file_ctl.variables[varnm2][
nlev = 32

means_yby_ctl = np.zeros(
    (years.size, nlat, nlon))  #year by year mean for each variable
means_yby_exp = np.zeros(
    (years.size, nlat, nlon))  #year by year mean for each variable
means_ctl = np.zeros((nlat, nlon))  #year by year mean for each variable
means_exp = np.zeros((nlat, nlon))  #year by year mean for each variable
diffs = np.zeros((nlat, nlon))  #multi-year exp-ctl diff for each variable
pvals = np.zeros((nlat, nlon))  #pvalues of ttest

for iy in range(0, years.size):
    # open data file
    fctl = fpath_ctl + ctl_pref + "_" + season + "_" + str(years[iy]) + ".nc"
    fexp = fpath_exp + exp_pref + "_" + season + "_" + str(years[iy]) + ".nc"
    file_ctl = netcdf_dataset(fctl, "r")
    file_exp = netcdf_dataset(fexp, "r")

    # read lat and lon
    lat = file_ctl.variables["lat"]
    lon = file_ctl.variables["lon"]

    means_yby_ctl[iy, :, :] = file_ctl.variables[varnm][0, :, :]
    means_yby_exp[iy, :, :] = file_exp.variables[varnm][0, :, :]

means_ctl[:, :] = np.mean(means_yby_ctl, axis=0)
means_exp[:, :] = np.mean(means_yby_exp, axis=0)
diffs = means_exp - means_ctl

if pole == "N":
    latbound1 = np.min(np.where(lat[:] > 55))
Пример #33
0
# You can reduce the resolution of the data returned from the server
# This can be helpful during testing if the dataset is very large
# Set this to one for full resolution, two for half, and so on
req["sub"] = '4'
print(req["sub"])

# In[8]:

# GET DATA
# m0 = datetime.now()

#dataset = getData(dataset_id, parameter, qbounds, '', req["sub"],basemapImageDir)

file = basemapImageDir + '/' + parameter + '.nc'
datafile = netcdf_dataset(file)
dataset = {"data": datafile}

dataObj = dataset['data']

# check data file for expected altitude dimension response
# pull out parameter data and x, y
if len(np.shape(dataObj.variables[parameter])) == 3:
    data = dataObj.variables[parameter][0, :, :]
elif len(np.shape(dataObj.variables[parameter])) == 4:
    data = dataObj.variables[parameter][0, 0, :, :]
elif len(np.shape(dataObj.variables[parameter])) == 2:
    data = dataObj.variables[parameter][:, :]

xgrid = dataObj.variables[xcoord_dimension["name"]][:]
ygrid = dataObj.variables[ycoord_dimension["name"]][:]
Пример #34
0
import os
import matplotlib.pyplot as plt
from netCDF4 import Dataset as netcdf_dataset
import numpy as np

import cartopy.crs as ccrs


# get the path of the file. It can be found in the data directory, conveniently
# at the same level as the cartopy/crs.py file.
fname = os.path.join(os.path.dirname(ccrs.__file__),
                     'data', 'netcdf', 'HadISST1_SST_update.nc'
                     )

dataset = netcdf_dataset(fname)
sst = dataset.variables['sst'][0, :, :]
lats = dataset.variables['lat'][:]
lons = dataset.variables['lon'][:]

ax = plt.axes(projection=ccrs.PlateCarree())

plt.contourf(lons, lats, sst, 60,
             transform=ccrs.PlateCarree())

ax.coastlines()

plt.show()
Пример #35
0
                                         'clevsDiff_conc_{}'.format(season))
        cmap = plt.get_cmap(config.get('seaice_modelvsobs', 'cmapDiff'))
        cmapIndices = config.getExpression('seaice_modelvsobs',
                                           'cmapIndicesDiff')
        cmapDiff = cols.ListedColormap(cmap(cmapIndices), "cmapDiff")

        lon0 = config.getfloat('seaice_modelvsobs',
                               'lon0_{}'.format(hemisphere))
        latmin = config.getfloat('seaice_modelvsobs',
                                 'latmin_{}'.format(hemisphere))

        # Load in sea-ice data
        #  Model...
        # ice concentrations
        fileName = "{}/{}".format(climodir_regridded, climofiles[climName])
        f = netcdf_dataset(fileName, mode='r')
        iceconc = f.variables["iceAreaCell"][:]
        if(first):
            lons = f.variables["lon"][:]
            lats = f.variables["lat"][:]
            print "Min lon: ", np.amin(lons), "Max lon: ", np.amax(lons)
            print "Min lat: ", np.amin(lats), "Max lat: ", np.amax(lats)
            Lons, Lats = np.meshgrid(lons, lats)
            first = False
        f.close()

        #  ...and observations
        # ice concentrations from NASATeam (or Bootstrap) algorithm
        for obsName in ['NASATeam', 'Bootstrap']:

            fileName = obs_iceconc_filenames['{}_{}'.format(climName, obsName)]