def test_read_example_data():
    # read any example data files
    for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
        f = netcdf_file(fname, 'r')
        f.close()
        f = netcdf_file(fname, 'r', mmap=False)
        f.close()
Esempio n. 2
0
def get_data(dir, run_id = 'def', plot = False):
    #if 'inputs' not in locals():
    inputs = netcdf.netcdf_file(dir + '{}_inputs.cdf'.format(run_id),'r',mmap = False)
    spectra = netcdf.netcdf_file(dir + '{}_spectra.cdf'.format(run_id),'r',mmap = False)
    #neutrals = netcdf.netcdf_file(dir + '{}_neutrals.cdf'.format(run_id),'r',mmap = False)
    #weights = netcdf.netcdf_file(dir + '{}_fida_weights.cdf'.format(run_id),'r',mmap = False)
    neutrals=weights=None
    fida = +spectra.variables['fida'].data
    wave = +spectra.variables['lambda'].data
    halo = +spectra.variables['halo'].data
    BE = [+spectra.variables['full'].data, +spectra.variables['half'].data, +spectra.variables['third'].data]
    if plot:
        fig, ax = pt.subplots(nrows = len(BE)+1)
        for i in range(fida.shape[0]):
            for ax_tmp, dat_tmp in zip(ax,BE):
                ax_tmp.plot(wave,dat_tmp[i,:])
            ax[len(BE)].plot(wave,halo[i,:])
        fig.canvas.draw();fig.show()
    #    ax[0].plot(fida[i,:])
    i = np.argmin(np.abs([np.mean(inputs.variables['z_grid'].data[i,:,:]) for i in range(inputs.variables['z_grid'].data.shape[0])]))
    z = inputs.variables['z_grid'].data[i,:,:]
    x_grid = inputs.variables['x_grid'].data[i,:,:]
    y_grid = inputs.variables['y_grid'].data[i,:,:]
    if plot:
        dat_grid = neutrals.variables['halodens'].data[0,i,:,:]
        n_halos = neutrals.variables['halodens'].shape[0]
        fig, ax = pt.subplots(nrows = n_halos)
        for j in range(n_halos):
            im = ax[j].pcolormesh(x_grid, y_grid, neutrals.variables['halodens'].data[j,i,:,:])
            pt.colorbar(im,ax=ax[j])
        fig.canvas.draw();fig.show
    return inputs, neutrals, spectra, weights
def plot_arm_speed(axis, startTime=-1):
    rootName = 'siemensSensors'
    f = netcdf.netcdf_file(rootName+'Data.nc', 'r')
    data1 = f.variables[rootName+'.data.'+'carouselSpeedSetpoint'].data[startSample:]
    data2 = f.variables[rootName+'.data.'+'carouselSpeedSmoothed'].data[startSample:]
    ts_trigger = f.variables[rootName+'.data.ts_trigger'].data[startSample:]*1.0e-9

    # Load the actual arm speed from the arm gyro
    rootName = 'armboneLisaSensors'
    fiile = netcdf.netcdf_file(rootName+'Data.nc', 'r')

    rawdata4 = fiile.variables['armboneLisaSensors.GyroState.gr'].data[startSample:]
    ts_trigger4 = fiile.variables['armboneLisaSensors.GyroState.ts_trigger'].data[startSample:]*1.0e-9
    #fullscale = 2000 # deg/sec
    #data4 = -1.0 * rawdata4 / (2**15) * fullscale * pi/180 - 0.0202 # Rad/s
    data4 = rawdata4

    if startTime == -1:
        startTime = ts_trigger[0]

    times = ts_trigger-startTime
    times4 = ts_trigger4-startTime

    pylab.hold(True)

    plot(times, data2, '.-', label='On Motor Side of Belt')
    plot(times4, data4,'.-',  label='From Gyro on Arm')
    plot(times, data1, '.-', label='Setpoint (Echoed)')
    ylabel('Arm rotation speed [Rad/s]')
    xlabel('Time [s]')
    #legend(['Setpoint (Echoed)', 'Setpoint (Sent)', 'On Motor Side of Belt', 'From Gyro on Arm'])
    title('Plot of Signals Related to Arm Speed')
    return startTime
def grab_data(rule):
    files = glob.glob(rule)

    filebases = []
    for i in range(0, len(files)):
        #Grab the part after the / of any path, and before the _k*.nc
        filebase = re.split('_', 
                            re.split('/', files[i])[-1]
                            )[0]
        #If we haven't seen this base before, keep it around.
        if not(filebases.count(filebase)):
            filebases.append(filebase)
    
    numbases = len(filebases)

    data =  numpy.ones(numbases, dtype = [('omega1', float), ('B', float),
                                          ('kmin', float), ('kmax', float),
                                          ('kpeak', float),
                                          ('peakgr', float)])

    path = ""
    for dir in (re.split('/', files[0])[0:-1]):
        path = path + dir + '/'

    for i in range(0,numbases):
        data[i]['omega1'] = float(re.split('B',
                                           re.split('w', filebases[i])[1])[0])
        data[i]['B'] = float(re.split('B', filebases[i])[1])

        kminname = path + filebases[i] + "_kmin.nc"
        kmaxname = path + filebases[i] + "_kmax.nc"
        kpeakname = path + filebases[i] + "_kpeak.nc"
        
        if(os.path.exists(kminname)):
            ncfile = netcdf.netcdf_file(kminname, 'r')
            data[i]['kmin'] = ncfile.kz
            ncfile.close()
        else:
            data[i]['kmin'] = numpy.nan

        if(os.path.exists(kmaxname)):
            ncfile = netcdf.netcdf_file(kmaxname, 'r')
            data[i]['kmax'] = ncfile.kz
            ncfile.close()
        else:
            data[i]['kmax'] = numpy.nan

        if(os.path.exists(kpeakname)):
            ncfile = netcdf.netcdf_file(kpeakname, 'r')
            data[i]['kpeak'] = ncfile.kz
            data[i]['peakgr'] = ncfile.variables['lambda'][0,0]
            ncfile.close()
        else:
            data[i]['kpeak'] = numpy.nan
            data[i]['peakgr'] = numpy.nan

    #Sort the array so things are nicer
    data = numpy.sort(data, order=['omega1', 'B'])
        
    return data
Esempio n. 5
0
def test_read_example_data():
    # read any example data files
    for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
        with netcdf_file(fname, 'r') as f:
            pass
        with netcdf_file(fname, 'r', mmap=False) as f:
            pass
Esempio n. 6
0
def test_read_write_sio():
    eg_sio1 = BytesIO()
    with make_simple(eg_sio1, 'w') as f1:
        str_val = eg_sio1.getvalue()

    eg_sio2 = BytesIO(str_val)
    with netcdf_file(eg_sio2) as f2:
        check_simple(f2)

    # Test that error is raised if attempting mmap for sio
    eg_sio3 = BytesIO(str_val)
    assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
    # Test 64-bit offset write / read
    eg_sio_64 = BytesIO()
    with make_simple(eg_sio_64, 'w', version=2) as f_64:
        str_val = eg_sio_64.getvalue()

    eg_sio_64 = BytesIO(str_val)
    with netcdf_file(eg_sio_64) as f_64:
        check_simple(f_64)
        assert_equal(f_64.version_byte, 2)
    # also when version 2 explicitly specified
    eg_sio_64 = BytesIO(str_val)
    with netcdf_file(eg_sio_64, version=2) as f_64:
        check_simple(f_64)
        assert_equal(f_64.version_byte, 2)
Esempio n. 7
0
def test_maskandscale():
    t = np.linspace(20, 30, 15)
    t[3] = 100
    tm = np.ma.masked_greater(t, 99)
    fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
    with netcdf_file(fname, maskandscale=True) as f:
        Temp = f.variables['Temperature']
        assert_equal(Temp.missing_value, 9999)
        assert_equal(Temp.add_offset, 20)
        assert_equal(Temp.scale_factor, np.float32(0.01))
        found = Temp[:].compressed()
        del Temp  # Remove ref to mmap, so file can be closed.
        expected = np.round(tm.compressed(), 2)
        assert_allclose(found, expected)

    with in_tempdir():
        newfname = 'ms.nc'
        f = netcdf_file(newfname, 'w', maskandscale=True)
        f.createDimension('Temperature', len(tm))
        temp = f.createVariable('Temperature', 'i', ('Temperature',))
        temp.missing_value = 9999
        temp.scale_factor = 0.01
        temp.add_offset = 20
        temp[:] = tm
        f.close()

        with netcdf_file(newfname, maskandscale=True) as f:
            Temp = f.variables['Temperature']
            assert_equal(Temp.missing_value, 9999)
            assert_equal(Temp.add_offset, 20)
            assert_equal(Temp.scale_factor, np.float32(0.01))
            expected = np.round(tm.compressed(), 2)
            found = Temp[:].compressed()
            del Temp
            assert_allclose(found, expected)
def load_steady_state_experiments_data():
    samplesToSkip = 3

    print('loading data...')
    rootName = 'lineAngleSensor2'
    f = netcdf.netcdf_file(rootName+'Data.nc', 'r')
    memberName = 'azimuth'
    azimuth = f.variables[rootName+'.data.'+memberName].data[samplesToSkip:]
    memberName = 'elevation'
    elevation = f.variables[rootName+'.data.'+memberName].data[samplesToSkip:]
    ts_trigger_las = f.variables[rootName+'.data.ts_trigger'].data[samplesToSkip:]*1.0e-9

    rootName = 'siemensSensors'
    f = netcdf.netcdf_file(rootName+'Data.nc', 'r')
    setpoint = f.variables[rootName+'.data.'+'carouselSpeedSetpoint'].data[samplesToSkip:]
    speed = f.variables[rootName+'.data.'+'carouselSpeedSmoothed'].data[samplesToSkip:]
    ts_trigger_siemens = f.variables[rootName+'.data.ts_trigger'].data[samplesToSkip:]*1.0e-9

    # Chose intersection of the two time ranges
    startTime = max(ts_trigger_las[0],ts_trigger_siemens[0])  
    endTime = min(ts_trigger_las[-1],ts_trigger_siemens[-1])
    ts_trigger_las -= startTime
    ts_trigger_siemens -= startTime
    startTime_new = 0
    endTime_new = endTime-startTime

    times = len(ts_trigger_las)+len(ts_trigger_siemens)

    # Since we didn't have the resampler turned on...
    t = numpy.linspace(startTime_new, endTime_new, times)
    elevation_resampled = numpy.interp(t, ts_trigger_las, elevation)
    speed_resampled = numpy.interp(t, ts_trigger_siemens, speed)
    
    return speed_resampled, elevation_resampled
def load_timeseries(filenames,region,months,bias):
	data=np.ma.zeros([len(filenames),len(months)*30])
	tmp=np.zeros([len(months)*30])
	i=0
	[j_s,j_e,i_s,i_e]=region
	for f in filenames:
#		print i,os.path.basename(f)
		try:
			f2=f.replace('field16','field16_1')
			for j,monthstr in enumerate(months):
				f_month=f[:-10]+monthstr+'.nc'
				f2_month=f2[:-10]+monthstr+'.nc'
				var1=netcdf_file(f_month,'r').variables['field16'][:,0,4:-7,4:-4]
				var1=np.ma.masked_values(var1,-1.07374e+09)
				var2=netcdf_file(f2_month,'r').variables['field16_1'][:,0,4:-7,4:-4]
				var2=np.ma.masked_values(var2,-1.07374e+09)
				tmp[j*30:(j+1)*30]=((var1+var2)/2.-bias)[:,j_s:j_e,i_s:i_e].mean(1).mean(1)
			
			if tmp.max()>350.0 or tmp.min()<170 or not np.all(np.isfinite(tmp)):
				print 'error: wierd vals',f
				continue
			else:
#				print tmp.min(),tmp.max()
				data[i,:]=tmp
				i=i+1
		except:
			print 'Error, cannot load files',f
			raise
			#continue
	return data[:i,:]
Esempio n. 10
0
    def _load_scipy(self, scipy_nc, *args, **kwdargs):
        """
        Interprets a netcdf file-like object using scipy.io.netcdf.
        The contents of the netcdf object are loaded into memory.
        """
        try:
            nc = netcdf.netcdf_file(scipy_nc, mode='r', *args, **kwdargs)
        except:
            scipy_nc = StringIO(scipy_nc)
            scipy_nc.seek(0)
            nc = netcdf.netcdf_file(scipy_nc, mode='r', *args, **kwdargs)

        def from_scipy_variable(sci_var):
            return Variable(dims = sci_var.dimensions,
                            data = sci_var.data,
                            attributes = sci_var._attributes)

        object.__setattr__(self, 'attributes', AttributesDict())
        self.attributes.update(nc._attributes)

        object.__setattr__(self, 'dimensions', OrderedDict())
        dimensions = OrderedDict((k, len(d))
                                 for k, d in nc.dimensions.iteritems())
        self.dimensions.update(dimensions)

        object.__setattr__(self, 'variables', OrderedDict())
        OrderedDict = OrderedDict((vn, from_scipy_variable(v))
                                   for vn, v in nc.variables.iteritems())
        self.variables.update()
Esempio n. 11
0
def test_read_write_sio():
    eg_sio1 = BytesIO()
    f1 = make_simple(eg_sio1, 'w')
    str_val = eg_sio1.getvalue()
    f1.close()
    eg_sio2 = BytesIO(str_val)
    f2 = netcdf_file(eg_sio2)
    for testargs in gen_for_simple(f2):
        yield testargs
    f2.close()
    # Test that error is raised if attempting mmap for sio
    eg_sio3 = BytesIO(str_val)
    yield assert_raises, ValueError, netcdf_file, eg_sio3, 'r', True
    # Test 64-bit offset write / read
    eg_sio_64 = BytesIO()
    f_64 = make_simple(eg_sio_64, 'w', version=2)
    str_val = eg_sio_64.getvalue()
    f_64.close()
    eg_sio_64 = BytesIO(str_val)
    f_64 = netcdf_file(eg_sio_64)
    for testargs in gen_for_simple(f_64):
        yield testargs
    yield assert_equal, f_64.version_byte, 2
    # also when version 2 explicitly specified
    eg_sio_64 = BytesIO(str_val)
    f_64 = netcdf_file(eg_sio_64, version=2)
    for testargs in gen_for_simple(f_64):
        yield testargs
    yield assert_equal, f_64.version_byte, 2
Esempio n. 12
0
def write_netcdf(path, fields, attrs={}, append=False, format='NETCDF3_64BIT'):
    """
    Write the data and grid information for *fields* to *path* as NetCDF.
    If the *append* keyword argument in True, append the data to an existing
    file, if it exists. Otherwise, clobber an existing files.
    """
    assert(format in _VALID_NETCDF_FORMATS)

    if os.path.isfile(path) and append:
        mode = 'a'
    else:
        mode = 'w'

    if format == 'NETCDF3_CLASSIC':
        root = nc.netcdf_file(path, mode, version=1)
    elif format == 'NETCDF3_64BIT':
        root = nc.netcdf_file(path, mode, version=2)
    else:
        root = nc4.Dataset(path, mode, format=format)

    _set_netcdf_attributes(root, attrs)
    _set_netcdf_structured_dimensions(root, fields.shape)
    _set_netcdf_variables(root, fields)

    root.close()
Esempio n. 13
0
    def get_fidasim_output(self,):
        '''Open the relevant netcdf files

        SRH: 23June2015
        '''
        self.inputs = netcdf.netcdf_file(self.directory + '{}_inputs.cdf'.format(self.run_id),'r',mmap = False)
        self.spectra = netcdf.netcdf_file(self.directory + '{}_spectra.cdf'.format(self.run_id),'r',mmap = False)
        self.neutrals = netcdf.netcdf_file(self.directory + '{}_neutrals.cdf'.format(self.run_id),'r',mmap = False)
Esempio n. 14
0
def WriteTMPave(biofile,physfile, outfile):
            
    nc=NC.netcdf_file(biofile,"r");
    DIMS=nc.dimensions;    
    jpk = DIMS['depth']
    jpj = DIMS['lat'  ]
    jpi = DIMS['lon'  ]

    
    ncOUT=NC.netcdf_file(outfile,"w")
    setattr(ncOUT,"Convenctions","COARDS")
    setattr(ncOUT,"DateStart",nc.DateStart)
    setattr(ncOUT,"Date__End",nc.Date__End)
    ncOUT.createDimension('time',   1)
    ncOUT.createDimension('lon'  ,jpi)
    ncOUT.createDimension('lat'  ,jpj)
    ncOUT.createDimension('depth',jpk)
    
    for var in ['lon','lat','depth']:
        ncvar=ncOUT.createVariable(var,'f',(var,))
        ncvar[:]=nc.variables[var].data
    nc.close()        
    
    setattr(ncOUT.variables['lon'],"long_name","Longitude")    
    setattr(ncOUT.variables['lat'],"long_name","Latitude")

    
    for var in ['N1p','N3n','O2o']  :
        ncIN = NC.netcdf_file(biofile,"r")      
        ncvar=ncOUT.createVariable(var,'f',('time','depth','lat','lon'))
        ncvar[:]=ncIN.variables[var].data.copy()
        setattr(ncvar,"long_name",var)
        setattr(ncvar,"missing_value",1.e+20)
        ncIN.close()
    for var in ['votemper','vosaline']  :
        ncIN = NC.netcdf_file(physfile,"r")      
        ncvar=ncOUT.createVariable(var,'f',('time','depth','lat','lon'))
        ncvar[:]=ncIN.variables[var].data.copy()
        setattr(ncvar,"long_name",var)
        setattr(ncvar,"missing_value",1.e+20)
        ncIN.close()
    
    AGGREGATE_DICT={'P_l':['P1l','P2l','P3l','P4l']}
    for var in AGGREGATE_DICT.keys():
              
        ncvar=ncOUT.createVariable(var,'f',('time','depth','lat','lon'))
        junk = np.zeros((1,jpk,jpj,jpi),np.float32)
        for lvar in AGGREGATE_DICT[var]:
            ncIN = NC.netcdf_file(biofile,"r")     
            junk +=ncIN.variables[lvar].data.copy()
            ncIN.close()
        tmask= junk > 1.e+19
        junk[tmask] = 1.e+20
        ncvar[:]=junk    
        setattr(ncvar,"long_name",var)
        setattr(ncvar,"missing_value",1.e+20)
    
    ncOUT.close()    
Esempio n. 15
0
def write_2d_file(M2d, varname, outfile, mask, fillValue=1.0e20):
    """
    Dumps a 2D array in a NetCDF file.


    Arguments:
    * M2d       * the 2D array to dump
    * varname   * the variable name on NetCDF file
    * outfile   * file that will be created. If it is an existing file,
                  it will be opened in 'append' mode.
    * mask      * a mask object consistent with M2d array
    * fillvalue * (optional) value to set missing_value attribute.
    
    When the file is opened in 'append' mode this method tries to adapt to
    existing dimension names (for example it works both with 'lon' or 'longitude')

    Does not return anything."""

    if os.path.exists(outfile):
        ncOUT = NC.netcdf_file(outfile, "a")
        print "appending ", varname, " in ", outfile
    else:
        ncOUT = NC.netcdf_file(outfile, "w")
        jpk, jpj, jpi = mask.shape
        ncOUT.createDimension("longitude", jpi)
        ncOUT.createDimension("latitude", jpj)
        ncOUT.createDimension("depth", jpk)

        ncvar = ncOUT.createVariable("longitude", "f", ("longitude",))
        setattr(ncvar, "units", "degrees_east")
        setattr(ncvar, "long_name", "longitude")
        setattr(ncvar, "standard_name", "longitude")
        setattr(ncvar, "axis", "X")
        setattr(ncvar, "valid_min", -5.5625)
        setattr(ncvar, "valid_max", 36.25)
        setattr(ncvar, "_CoordinateAxisType", "Lon")
        ncvar[:] = mask.xlevels[0, :]

        ncvar = ncOUT.createVariable("latitude", "f", ("latitude",))
        setattr(ncvar, "units", "degrees_north")
        setattr(ncvar, "long_name", "latitude")
        setattr(ncvar, "standard_name", "latitude")
        setattr(ncvar, "axis", "Y")
        setattr(ncvar, "valid_min", 30.1875)
        setattr(ncvar, "valid_max", 45.9375)
        setattr(ncvar, "_CoordinateAxisType", "Lat")
        ncvar[:] = mask.ylevels[:, 0]

    ncvar = ncOUT.createVariable(varname, "f", (lat_dimension_name(ncOUT), lon_dimension_name(ncOUT)))
    setattr(ncvar, "fillValue", fillValue)
    setattr(ncvar, "missing_value", fillValue)
    setattr(ncvar, "coordinates", "latitude longitude")
    ncvar[:] = M2d
    setattr(ncOUT, "latitude_min", 30.0)
    setattr(ncOUT, "latitude_max", 46.0)
    setattr(ncOUT, "longitude_min", -6.0)
    setattr(ncOUT, "longitude_max", 37.0)
    ncOUT.close()
Esempio n. 16
0
def test_timestep_diff(data, dt, eps=2.e-4):
    """
    checking if the results are close to the referential ones 
    (stored in refdata folder)
    """

    filename = "timesteptest_dt=" + str(dt) + ".nc"
    f_test = netcdf.netcdf_file(filename, "r")
    f_ref  = netcdf.netcdf_file(os.path.join("long_test/refdata", filename), "r")
    for var in ["t", "z", "th_d", "T", "p", "r_v", "rhod"]:
         assert np.isclose(f_test.variables[var][:], f_ref.variables[var][:], atol=0, rtol=eps).all(), "differs e.g. " + str(var) + "; max(ref diff) = " + str(np.where(f_ref.variables[var][:] != 0., abs((f_test.variables[var][:]-f_ref.variables[var][:])/f_ref.variables[var][:]), 0.).max())
def use_netcdf_files():
	nc = netcdf.netcdf_file('/home/nicholas/data/netcdf_files/CFLX_2000_2009.nc', 'r')
	all_data = nc.variables['Cflx'][:, :45, :180]
	nc.close()
	all_data = all_data * 1000 * 24 * 60 * 60
	all_data = ma.masked_values(all_data, 1e20)

	nc = netcdf.netcdf_file('/home/nicholas/data/netcdf_files/ORCA2.0_grid.nc', 'r')
	mask = nc.variables['mask'][0, :45, :180]
	nc.close()
	mask = ma.masked_values(mask, -1e34)
	return all_data, mask
Esempio n. 18
0
def get_los_data(dir='/u/haskeysr/FIDASIM/RESULTS/D3D/155196/00500/MAIN_ION330/', run_id = 'def', plot = False):
    #if 'inputs' not in locals():
    inputs = netcdf.netcdf_file(dir + '{}_inputs.cdf'.format(run_id),'r',mmap = False)
    los_wght = inputs.variables['los_wght'].data
    #spectra = netcdf.netcdf_file(dir + '{}_spectra.cdf'.format(run_id),'r',mmap = False)
    neutrals = netcdf.netcdf_file(dir + '{}_neutrals.cdf'.format(run_id),'r',mmap = False)
    halo_dens = neutrals.variables['halodens'].data
    #weights = netcdf.netcdf_file(dir + '{}_fida_weights.cdf'.format(run_id),'r',mmap = False)
    print 'hello'
    print 'hello2'
    print 'hello3'
    1/0
    return halo_dens, los_wght
Esempio n. 19
0
def test_byte_gatts():
    # Check that global "string" atts work like they did before py3k
    # unicode and general bytes confusion
    with in_tempdir():
        filename = 'g_byte_atts.nc'
        f = netcdf_file(filename, 'w')
        f._attributes['holy'] = b'grail'
        f._attributes['witch'] = 'floats'
        f.close()
        f = netcdf_file(filename, 'r')
        assert_equal(f._attributes['holy'], b'grail')
        assert_equal(f._attributes['witch'], b'floats')
        f.close()
Esempio n. 20
0
def doHeatContent(thetaFile, thetaVar, rhoFile, rhoVar, nodata, thresholdDepth, outFile, outFormat, options):
    # open file
    fhTheta = netcdf.netcdf_file(thetaFile, 'r')
    if fhTheta is None:
        exitMessage("Could not open file {0}. Exit 2.".format(thetaFile), 2)

    fhRho = netcdf.netcdf_file(rhoFile, 'r')
    if fhRho is None:
        exitMessage("Could not open file {0}; Exit(2).".format(rhoFile), 2)

    thetao = fhTheta.variables[thetaVar][:] # [time, levels, lat, lon]
    rho = fhRho.variables[rhoVar][:]
    levelsTmp = fhTheta.variables['lev_bnds'][:]
    levels = numpy.ravel(0.5*(levelsTmp[:,0] + levelsTmp[:,1] ))
    # mapHeat : time, lat, lon
    mapHeat = numpy.zeros( (thetao.shape[0], thetao.shape[2], thetao.shape[3]) ) - 1
    
    # Compressing loops...
    timelatlon=[]
    for itime in range(thetao.shape[0]):
        for ilat in range(thetao.shape[2]):
            for ilon in  range(thetao.shape[3]):
                if thetao[itime, 0, ilat, ilon] < nodata:
                    timelatlon.append((itime, ilat, ilon))

    # loop over time, lat and lon
    counter=0
    for ill in timelatlon:
        profileTheta = thetao[ ill[0], : ,ill[1], ill[2] ].ravel()
        profileRho = rho[ ill[0], : ,ill[1], ill[2] ].ravel()
        heat = 0
        heat = computeHeatPotential( profileTheta, profileRho, levels, thresholdDepth, nodata )
        mapHeat[ ill[0], ill[1], ill[2] ] = heat
        gdal.TermProgress_nocb( counter/float(len(timelatlon)) )
        counter = counter+1
 
    gdal.TermProgress_nocb(1)

    # save result
    outDrv = gdal.GetDriverByName(outformat)
    outDS = outDrv.Create(outFile, mapHeat.shape[2], mapHeat.shape[1], mapHeat.shape[0], GDT_Float32, options)
    outDS.SetProjection(latlon())

    for itime in range(thetao.shape[0]):
        data = numpy.ravel(mapHeat[itime, :, :])
        outDS.GetRasterBand(itime+1).WriteArray( numpy.flipud( data.reshape((mapHeat.shape[1], mapHeat.shape[2])) ) )
        gdal.TermProgress_nocb( itime/float(thetao.shape[0]) )
    gdal.TermProgress_nocb(1)

    outDS = None
Esempio n. 21
0
def create_ave_pp_header(datestr):
    '''
    Generates output files, in PROFILES directory
    Files refer to a time frame and will contain all the variables
    Works in append mode, if the file name exists.'''
    
    ave_Pprofiles = OUTPUT_DIR_PRO + IOnames.Output.prefix + datestr + ".profiles.nc"
    if os.path.exists(ave_Pprofiles):
        ncOUT_Pprofiles =  NC.netcdf_file(ave_Pprofiles,"a")
    else:
        ncOUT_Pprofiles = NC.netcdf_file(ave_Pprofiles,"w")
        ncOUT_Pprofiles.createDimension("Ncruise"   ,nCruise)
        ncOUT_Pprofiles.createDimension("z"         ,jpk)
        setattr(ncOUT_Pprofiles,"CruiseIndex",CruiseDescr)
    return ncOUT_Pprofiles
Esempio n. 22
0
def test_byte_gatts():
    # Check that global "string" atts work like they did before py3k
    # unicode and general bytes confusion
    filename = pjoin(TEST_DATA_PATH, 'g_byte_atts.nc')
    f = netcdf_file(filename, 'w')
    f._attributes['holy'] = b'grail'
    f._attributes['witch'] = 'floats'
    f.close()

    f = netcdf_file(filename, 'r')
    assert_equal(f._attributes['holy'], b'grail')
    assert_equal(f._attributes['witch'], b'floats')
    f.close()

    os.remove(filename)
Esempio n. 23
0
def size_check(datadir):
	"""Determine how large the merged arrays will be, for preallocation.
	"""
	files = glob.glob(datadir + '*.nc')
	numFiles = len(files)
	ncid = netcdf.netcdf_file(files[0])
	nele = ncid.dimensions['nele']
	node = ncid.dimensions['node']
	ncid.close()
	timeDim = 0
	for i in xrange(numFiles):
		ncid = netcdf.netcdf_file(files[i])
		timeDim += len(ncid.variables['time'].data)
		ncid.close()
	return nele, node, timeDim
Esempio n. 24
0
def data(request):
    # initial condition
    SO2_g_init  = 200e-12 
    O3_g_init   = 50e-9
    H2O2_g_init = 500e-12
    outfreq     = 1000    
    z_max       = 250.
    dt          = .01
    w           = 1.

    # turn on chemistry
    chem_dsl = True
    chem_dsc = True
    chem_rct = True
    chem_spn = 10

    # define output for moments and chemistry
    out_bin_chem = '{"radii": {"rght": 1, "moms": [0, 1, 3], "drwt": "wet", "nbin": 1, "lnli": "lin", "left": 0}, "chem": {"rght": 1, "moms": ["O3_a", "H2O2_a", "SO2_a", "H", "OH", "HSO3_a", "SO3_a", "HSO4_a", "SO4_a", "S_VI"], "drwt": "wet", "nbin": 1, "lnli": "lin", "left": 0}, "radiidry": {"rght": 1, "moms": [0, 1, 3], "drwt": "dry", "nbin": 1, "lnli": "lin", "left": 0}}'
    out_bin      = '{"radii": {"rght": 1, "moms": [0, 1, 3], "drwt": "wet", "nbin": 1, "lnli": "lin", "left": 0}, "radiidry": {"rght": 1, "moms": [0, 1, 3], "drwt": "dry", "nbin": 1, "lnli": "lin", "left": 0}}'

    # running parcel model for open / closed chem system  ...
    parcel(dt = dt, z_max = z_max, w = w, outfreq = outfreq,\
            SO2_g_0 = SO2_g_init, O3_g_0 = O3_g_init, H2O2_g_0 = H2O2_g_init,\
            chem_sys = 'open',   outfile="test_chem_open.nc",\
            chem_dsl = chem_dsl, chem_dsc = chem_dsc, chem_rct = chem_rct, chem_spn = chem_spn, \
            out_bin = out_bin_chem)
    parcel(dt = dt, z_max = z_max, w = w, outfreq = outfreq,\
             SO2_g_0 = SO2_g_init, O3_g_0 = O3_g_init, H2O2_g_0 = H2O2_g_init,\
             chem_sys = 'closed', outfile="test_chem_closed.nc",\
             chem_dsl = chem_dsl, chem_dsc = chem_dsc, chem_rct = chem_rct, chem_spn = chem_spn, \
             out_bin = out_bin_chem)
    parcel(dt = dt, z_max = z_max, w = w, outfreq = outfreq, SO2_g_0=0, O3_g_0=0, H2O2_g_0=0,\
             outfile="test_chem_off.nc", out_bin = out_bin)

    # TODO - why do I have to repeat this import here?
    from scipy.io import netcdf

    data = {'open'   : netcdf.netcdf_file("test_chem_open.nc", "r"),\
            'closed' : netcdf.netcdf_file("test_chem_closed.nc", "r"),\
            'off'    : netcdf.netcdf_file("test_chem_off.nc", "r")}

    def removing_files():
        for name, netcdf in data.iteritems():
            subprocess.call(["rm", "test_chem_" + name + ".nc"])

    request.addfinalizer(removing_files)

    return data
Esempio n. 25
0
def data(request):
    """
    Run parcel simulation and return opened netdcf file

    """
    # copy options from chem_conditions
    p_dict = copy.deepcopy(parcel_dict)

    # modify options from chem_conditions
    p_dict['outfreq']  = p_dict['z_max'] / p_dict['w'] / p_dict['dt'] / 4
    p_dict['outfile']  = "test_mass.nc"
    p_dict['chem_dsl'] = True

    p_dict['out_bin'] = \
           '{"wradii": {"rght": 1e-4, "left": 1e-10, "drwt": "wet", "lnli": "lin", "nbin": 500, "moms": [0, 3]}, \
             "dradii": {"rght": 1e-4, "left": 1e-10, "drwt": "dry", "lnli": "lin", "nbin": 500, "moms": [0, 3]}}'

    # run parcel model
    parcel(**p_dict)

    data = netcdf.netcdf_file(p_dict['outfile'], "r")

    # remove all netcdf files after all tests
    def removing_files():
        subprocess.call(["rm", p_dict['outfile']])

    request.addfinalizer(removing_files)
    return data
Esempio n. 26
0
def saveCurrentToFile(
        filename,
        I,
        variables,
        potentials
        ):
    f = netcdf.netcdf_file(filename, 'w')
    sh = I.shape
    n = f.createDimension('n_potentials', sh[0])
    xd = f.createDimension('x', sh[1])
    yd = f.createDimension('y', sh[2])
    zd = f.createDimension('z', sh[3])
    vector = f.createDimension('vector', 3)
    posd = f.createVariable('position', dimensions=('vector', 'x', 'y', 'z'), type=np.dtype('f'))
    Id = f.createVariable('I', dimensions=('n_potentials', 'x', 'y', 'z'), type=np.dtype('f'))
    bias = f.createVariable('bias', dimensions=('n_potentials', ), type=np.dtype('f'))
    temperature = f.createVariable('temperature', dimensions=('n_potentials', ), type=np.dtype('f'))
    broadening = f.createVariable('broadening', dimensions=('n_potentials', ), type=np.dtype('f'))

    for i, potential in enumerate(potentials):
        bias[i] = potential['voltage']
        temperature[i] = potential['temperature']
        broadening[i] = potential['state_broadening']


    Id[:,:,:,:] = I
    posd[:,:,:,:] = variables
    f.close()
Esempio n. 27
0
def read_netcdf(nc_file, reshape=False, just_grid=False):
    """
    Reads the NetCDF file *nc_file*, and writes it to the fields of a new
    RasterModelGrid, which it then returns.
    Check the names of the fields in the returned grid with
    grid.at_nodes.keys().
    """
    try:
        root = nc.netcdf_file(nc_file, 'r', version=2)
    except TypeError:
        root = nc4.Dataset(nc_file, 'r', format='NETCDF4')

    shape = _read_netcdf_grid_shape(root)
    spacing = _read_netcdf_grid_spacing(root)

    assert(len(shape) == 2)
    assert(len(spacing) == 2)
    if spacing[0] != spacing[1]:
        raise NotRasterGridError()

    grid = RasterModelGrid(num_rows=shape[0], num_cols=shape[1], dx=spacing[0])

    if not just_grid:
        fields = _read_netcdf_structured_data(root)
        for (name, values) in fields.items():
            grid.add_field('node', name, values)

    root.close()

    return grid
Esempio n. 28
0
def createSampleXRange(M,N,filename,bounds=None,xdim=None,ydim=None):
    if xdim is None:
        xdim = 1.0
    if ydim is None:
        ydim = 1.0
    if bounds is None:
        xmin = 0.5
        xmax = xmin + (N-1)*xdim
        ymin = 0.5
        ymax = ymin + (M-1)*ydim
    else:
        xmin,xmax,ymin,ymax = bounds
    data = np.arange(0,M*N).reshape(M,N).astype(np.int32)
    cdf = netcdf.netcdf_file(filename,'w')
    cdf.createDimension('side',2)
    cdf.createDimension('xysize',M*N)
    dim = cdf.createVariable('dimension','i',('side',))
    dim[:] = np.array([N,M])
    spacing = cdf.createVariable('spacing','i',('side',))
    spacing[:] = np.array([xdim,ydim])
    zrange = cdf.createVariable('z_range',INVERSE_NETCDF_TYPES[str(data.dtype)],('side',))
    zrange[:] = np.array([data.min(),data.max()])
    x_range = cdf.createVariable('x_range','d',('side',))
    x_range[:] = np.array([xmin,xmax])
    y_range = cdf.createVariable('y_range','d',('side',))
    y_range[:] = np.array([ymin,ymax])
    z = cdf.createVariable('z',INVERSE_NETCDF_TYPES[str(data.dtype)],('xysize',))
    z[:] = data.flatten()
    cdf.close()
    return data
Esempio n. 29
0
    def read_raw(self, var):
        '''
        If profile is None the entire 2d array (depth,time) is returned
        If a profile object is provided, a 1d array (dimension depth) is returned
        corresponding to the pointprofile provided
        '''
        if not self.__file_already_read :
            ncIN = NC.netcdf_file(self.filename,'r')
            self._timesInFile = ncIN.variables['TIME'].data.copy()
            self._VAR         = ncIN.variables[var   ].data.copy()
            self._QC         = ncIN.variables[var + "_QC"  ].data.copy()
            if ncIN.variables.has_key('PRES'):
                self._PRES        = ncIN.variables['PRES'].data.copy()
                pres_qc           = ncIN.variables['PRES_QC'].data.copy()
            else:
                self._PRES        = ncIN.variables['DEPH'].data.copy()
                pres_qc           = ncIN.variables['DEPH_QC'].data.copy()

            self._TEMP = ncIN.variables['TEMP'].data.copy()
            self._PSAL = ncIN.variables['PSAL'].data.copy()

            temp_qc = ncIN.variables['TEMP_QC'].data.copy()
            psal_qc = ncIN.variables['PSAL_QC'].data.copy()
            ncIN.close()
            self._good_data = (temp_qc == 1 ) & (psal_qc == 1 ) & (pres_qc == 1 ) & (self._QC == 1)
            self.__file_already_read = True

            tconv = T90conv(self._TEMP)
            self._RHO = seawater.dens(self._PSAL,tconv, self._PRES)
        return self._VAR, self._PRES, self._QC, self._timesInFile
Esempio n. 30
0
def test_read_withMissingValue():
    # For a variable with missing_value but not _FillValue, the missing_value
    # should be used
    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
    with netcdf_file(fname, maskandscale=True) as f:
        vardata = f.variables['var4_missingValue'][:]
        assert_mask_matches(vardata, [False, True, False])
Esempio n. 31
0
    var.update(tmp_result)
    result = var
    status = pyferret.stop()
    if verbose:
        if status:
            print "PyFerret stopped."
        else:
            print "PyFerret failed to stop."
    return result


regrid_primitive = run_worker(regrid_once_primitive)

if __name__ == '__main__':
    import scipy.io.netcdf as netcdf
    ncfile_low = netcdf.netcdf_file("land_mask_lowres.nc")
    newvar = dict(data=ncfile_low.variables['land_mask'].data,
                  coords=[ncfile_low.variables[dim].data
                          for dim in ncfile_low.variables['land_mask'].\
                          dimensions],
                  dimunits=[ncfile_low.variables[dim].units
                            for dim in ncfile_low.variables['land_mask'].\
                            dimensions])
    ncfile_high = netcdf.netcdf_file("land_mask_highres.nc")
    var_high = dict(data=ncfile_high.variables['land_mask'].data,
                    coords=[ncfile_high.variables[dim].data
                            for dim in ncfile_high.variables['land_mask'].\
                            dimensions],
                    dimunits=[ncfile_high.variables[dim].units
                              for dim in ncfile_high.variables['land_mask'].\
                              dimensions])
Esempio n. 32
0
    def produce(self, data):
        base_url = data["http"]["url"]
        pad = data["http"]["padProfileID"]
        fd, temp_nc = mkstemp()
        os.close(fd)
        try:
            # Iterate over the floats
            for floats in data["floats"]:
                # Handle null profile number for this float - just get the last profile
                if data["floats"][floats] is None:
                    url = pystache.render(base_url, {
                        "floatID": floats
                    }).rsplit("/", 1)[0]
                    idx = requests.get(url)
                    if idx.status_code == 404:
                        raise Exception(
                            "Internet",
                            "Could not load FTP index {0}".format(url))
                    data["floats"][floats] = \
                        int(idx.content.rsplit("<a href=", 1)[1].
                            split("_")[1].split(".")[0]) - 1
                # Iterate over the profiles for this float
                profile_found = True
                while profile_found:
                    sequence_no = data["floats"][floats] + 1
                    profile_found = False
                    profile_quality = None
                    for quality in ["D", "R"]:
                        url = self.format_url(base_url, floats, sequence_no,
                                              quality, pad)
                        with open(temp_nc, 'wb') as handle:
                            r = requests.get(url, stream=True)
                            if r.status_code == 404:
                                continue
                            if not r.ok:
                                logging.info("Problem with %s", url)
                                continue
                            profile_found = True
                            profile_quality = quality
                            for block in r.iter_content(1024):
                                handle.write(block)
                            break

                    if profile_found:
                        try:
                            last_modified = datetime.strftime(
                                datetime.strptime(r.headers["Last-Modified"],
                                                  "%a, %d %b %Y %H:%M:%S %Z"),
                                "%Y-%m-%dT%H:%M:%SZ")
                            f = netcdf.netcdf_file(temp_nc, "rw")
                            # Add some attributes we want to have when we get to Erddap
                            setattr(f, "FLOAT_WMO_ID", str(floats))
                            setattr(f, "FLOAT_PROFILE_SEQUENCE_NUMBER",
                                    str(sequence_no))
                            setattr(f, "SOURCE_URL", url)
                            setattr(f, "QUALITY", quality)
                            setattr(f, "LAST_MODIFIED_DATETIME", last_modified)
                            f.close()
                            logging.info("%s %s %s %s %s", f.FLOAT_WMO_ID,
                                         f.FLOAT_PROFILE_SEQUENCE_NUMBER,
                                         f.SOURCE_URL,
                                         f.LAST_MODIFIED_DATETIME, f.QUALITY)
                            with open(temp_nc, "rb") as nc_file:
                                nc_encoded = base64.b64encode(nc_file.read())
                            result = {
                                "source_url": url,
                                "last_modified": last_modified,
                                "netcdf": nc_encoded,
                                "float": floats,
                                "quality": profile_quality,
                                "sequence_no": sequence_no
                            }

                            data["floats"][floats] += 1
                            yield (data, result)
                        except AssertionError:
                            pass
                    for i in range(5 * 10):
                        time.sleep(0.1)

        finally:
            try:
                os.remove(temp_nc)
            except OSError:
                pass
Esempio n. 33
0
def extract_nc3_by_name(filename, dsname):
    nc_data = netcdf.netcdf_file(filename, "r")
    ds = np.array(nc_data.variables[dsname][:])
    nc_data.close()
    return ds
Esempio n. 34
0
import numpy as np
from scipy.io import netcdf
f_path = 'precip.nc'  #your file from the NCEP reanalysis plotter

# Retrieve data from NetCDF file
with netcdf.netcdf_file(f_path, 'r') as f:
    lon = f.variables['lon'][::]  # copy as list
    lat = f.variables[
        'lat'][::-1]  # invert the latitude vector -> South to North
    air = f.variables['prate'][0, ::-1, :]

# Shift 'lon' from [0,360] to [-180,180], make numpy array
tmp_lon = np.array([
    lon[n] - 360 if l >= 180 else lon[n] for n, l in enumerate(lon)
])  # => [0,180]U[-180,2.5]

i_east, = np.where(tmp_lon >= 0)  # indices of east lon
i_west, = np.where(tmp_lon < 0)  # indices of west lon
lon = np.hstack((tmp_lon[i_west], tmp_lon[i_east]))  # stack the 2 halves

# Correspondingly, shift the 'air' array
tmp_air = np.array(air)
air = np.hstack((tmp_air[:, i_west], tmp_air[:, i_east]))
Esempio n. 35
0
    def getNetCDFHeader(cls, filename):
        """Get the header information from a GMT NetCDF3 file.
        :param fname:
           File name of GMT NetCDF3 grid
        :returns:
           - GeoDict specifying spatial extent, resolution, and shape of grid inside NetCDF file.
           - xvar array specifying X coordinates of data columns
           - xvar array specifying Y coordinates of data rows
        """
        cdf = netcdf.netcdf_file(filename)
        geodict = {}
        xvarname = None
        registration = 'gridline'
        if hasattr(cdf, 'node_offset') and getattr(cdf, 'node_offset') == 1:
            registration = 'pixel'
        if 'x' in cdf.variables.keys():
            xvarname = 'x'
            yvarname = 'y'
        elif 'lon' in cdf.variables.keys():
            xvarname = 'lon'
            yvarname = 'lat'
        if xvarname is not None:
            xvar = cdf.variables[xvarname].data.copy()

            #GMT grids can have longitudes from 0-360, we're correcting for that here
            #however, sometimes GMT (or at least netcdf) allows an array for longitude centers
            #where the max value exceeds +180.0.  Here we're also trapping for that situation,
            #and hopefully not catastrophically breaking a bunch of other stuff.
            if (xvar > 180.01).any():
                xvar = xvar - 360

            yvar = cdf.variables[yvarname].data.copy()
            geodict['nx'] = len(xvar)
            geodict['ny'] = len(yvar)
            geodict['xmin'] = xvar.min()
            geodict['xmax'] = xvar.max()
            geodict['ymin'] = yvar.min()
            geodict['ymax'] = yvar.max()
            newx = np.linspace(geodict['xmin'],
                               geodict['xmax'],
                               num=geodict['nx'])
            newy = np.linspace(geodict['ymin'],
                               geodict['ymax'],
                               num=geodict['ny'])
            geodict['dx'] = newx[1] - newx[0]
            geodict['dy'] = newy[1] - newy[0]
        elif 'x_range' in cdf.variables.keys():
            geodict['xmin'] = cdf.variables['x_range'].data[0]
            geodict['xmax'] = cdf.variables['x_range'].data[1]
            geodict['ymin'] = cdf.variables['y_range'].data[0]
            geodict['ymax'] = cdf.variables['y_range'].data[1]
            geodict['nx'], geodict['ny'] = cdf.variables['dimension'].data
            #geodict['dx'],geodict['dy'] = cdf.variables['spacing'].data
            xvar = np.linspace(geodict['xmin'],
                               geodict['xmax'],
                               num=geodict['nx'])
            yvar = np.linspace(geodict['ymin'],
                               geodict['ymax'],
                               num=geodict['ny'])
            geodict['dx'] = xvar[1] - xvar[0]
            geodict['dy'] = yvar[1] - yvar[0]
        else:
            raise DataSetException(
                'No support for CDF data file with variables: %s' %
                str(cdf.variables.keys()))

        #We are going to represent all grids internally as grid-line registered
        #The difference between pixel and gridline-registered grids is depicted well here:
        #http://gmt.soest.hawaii.edu/doc/5.1.0/GMT_Docs.html#grid-registration-the-r-option
        if registration == 'pixel':
            geodict['xmin'] += geodict['dx'] / 2.0
            geodict['xmax'] -= geodict['dx'] / 2.0
            geodict['ymin'] += geodict['dy'] / 2.0
            geodict['ymax'] -= geodict['dy'] / 2.0
        #because dx/dy are not explicitly defined in netcdf headers, here we'll assume
        #that those values are adjustable, and we'll preserve the shape and extent.
        gd = GeoDict(geodict, adjust='res')
        return (gd, xvar, yvar)
Esempio n. 36
0
    def write_coordinates(self, atoms, filename=''):
        """ write amber coordinates in netCDF format,
            only rectangular unit cells are allowed"""
        if filename == '':
            filename = self.incoordfile
        fout = netcdf.netcdf_file(filename, 'w')
        # dimension
        fout.Conventions = 'AMBERRESTART'
        fout.ConventionVersion = "1.0"
        fout.title = 'Ase-generated-amber-restart-file'
        fout.application = "AMBER"
        fout.program = "ASE"
        fout.programVersion = "1.0"
        fout.createDimension('cell_spatial', 3)
        fout.createDimension('label', 5)
        fout.createDimension('cell_angular', 3)
        fout.createDimension('time', 1)
        time = fout.createVariable('time', 'd', ('time', ))
        time.units = 'picosecond'
        time[0] = 0
        fout.createDimension('spatial', 3)
        spatial = fout.createVariable('spatial', 'c', ('spatial', ))
        spatial[:] = np.asarray(list('xyz'))
        # spatial = 'xyz'

        natom = len(atoms)
        fout.createDimension('atom', natom)
        coordinates = fout.createVariable('coordinates', 'd',
                                          ('atom', 'spatial'))
        coordinates.units = 'angstrom'
        coordinates[:] = atoms.get_positions()[:]

        if atoms.get_velocities() is not None:
            velocities = fout.createVariable('velocities', 'd',
                                             ('atom', 'spatial'))
            velocities.units = 'angstrom/picosecond'
            velocities[:] = atoms.get_velocities()[:]

        # title
        cell_angular = fout.createVariable('cell_angular', 'c',
                                           ('cell_angular', 'label'))
        cell_angular[0] = np.asarray(list('alpha'))
        cell_angular[1] = np.asarray(list('beta '))
        cell_angular[2] = np.asarray(list('gamma'))

        # title
        cell_spatial = fout.createVariable('cell_spatial', 'c',
                                           ('cell_spatial', ))
        cell_spatial[0], cell_spatial[1], cell_spatial[2] = 'a', 'b', 'c'

        # data
        cell_lengths = fout.createVariable('cell_lengths', 'd',
                                           ('cell_spatial', ))
        cell_lengths.units = 'angstrom'
        cell_lengths[0] = atoms.get_cell()[0, 0]
        cell_lengths[1] = atoms.get_cell()[1, 1]
        cell_lengths[2] = atoms.get_cell()[2, 2]

        cell_angles = fout.createVariable('cell_angles', 'd',
                                          ('cell_angular', ))
        box_alpha, box_beta, box_gamma = 90.0, 90.0, 90.0
        cell_angles[0] = box_alpha
        cell_angles[1] = box_beta
        cell_angles[2] = box_gamma

        cell_angles.units = 'degree'
        fout.close()
Esempio n. 37
0
diag = 'atmos_daily'
diago = 'eflx2d'
npert = np.size(pert)
var1_name = 'vcomp'  #if adjust, vcomp must be var1
var2_name = 'temp'
varo_name = 'vTtr_zm'
adj = True  #T if vcomp, F if omega (need different adjustment)
sind = range(npert)
init = True
init3d = False
pind = 0
for si in sind:
    atmdir = indir + exper + pert[si] + plat + reg + '/history/'
    stafile = atmdir + '00000.atmos_average.nc'
    fs = []
    fs.append(nc.netcdf_file(stafile, 'r', mmap=True))
    bk = fs[-1].variables['bk'][:].astype(np.float64)
    pk = fs[-1].variables['pk'][:].astype(np.float64)
    lat = fs[-1].variables['lat'][:].astype(np.float64)
    lon = fs[-1].variables['lon'][:].astype(np.float64)
    pfull = fs[-1].variables['pfull'][pind:].astype(np.float64)
    #zsurf = fs[-1].variables['zsurf'][:].astype(np.float64)
    fs[-1].close()
    nlat = np.size(lat)
    nlon = np.size(lon)
    nlev = np.size(pfull)
    #%%
    filename = atmdir + '00000.atmos_daily.nc'
    fs.append(nc.netcdf_file(filename, 'r', mmap=True))
    vcomp = fs[-1].variables['vcomp'][500:, pind:, :, :].astype(
        np.float64)  #t,p,lat,lon
Esempio n. 38
0
def test_read_withFillValNaN():
    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
    with netcdf_file(fname, maskandscale=True) as f:
        vardata = f.variables['var5_fillvalNaN'][:]
        assert_mask_matches(vardata, [False, True, False])
    def read_E_out(self, ref_file, rec_file):
        """read data from the output file, produce the received E signal,
        return the complex E
        """
        #print 'reading file:',file
        f = nc.netcdf_file(ref_file, 'r')
        #print 'finish reading.'

        if (self.dimension == 2):
            y = np.copy(f.variables['a_y'].data)
            z = np.copy(f.variables['a_z'].data)
            z_idx = f.dimensions['a_nz'] / 2 - 1

            # FWR2D output doesn't include the central ray WKB phase advance in
            # paraxial region. The complete result needs to take into account
            # this additional phase
            ephi_wkb = f.variables['p_phaser'][...] +\
                       1j * f.variables['p_phasei'][...]
            D_ephi = ephi_wkb[0] / ephi_wkb[-1]
            E_ref = np.copy(f.variables['a_Er'][1,z_idx,:] + \
                            1j*f.variables['a_Ei'][1,z_idx,:])*D_ephi**2
            f.close()

            receiver = c5.C5_reader(rec_file)

            E_rec = receiver.E_re_interp(z,
                                         y) + 1j * receiver.E_im_interp(z, y)

            E_out = np.trapz(E_ref * np.conj(E_rec[z_idx, :]), x=y)
            return E_out

        elif (self.dimension == 3):
            y = np.copy(f.variables['a_y'][:])
            z = np.copy(f.variables['a_z'][:])

            E_ref_re_interp = interp2d(y,
                                       z,
                                       f.variables['a_Er'][1, :, :],
                                       kind='cubic',
                                       fill_value=0)
            E_ref_im_interp = interp2d(y,
                                       z,
                                       f.variables['a_Ei'][1, :, :],
                                       kind='cubic',
                                       fill_value=0)
            f.close()

            receiver = c5.C5_reader(rec_file)

            #use area average to estimate E_ref*np.conj(E_receive) integrated over y,z dimension.
            ymin = np.max([y[0], receiver.X1D[0]])
            ymax = np.min([y[-1], receiver.X1D[-1]])
            zmin = np.max([z[0], receiver.Y1D[0]])
            zmax = np.min([z[-1], receiver.Y1D[-1]])
            y_fine = np.linspace(ymin, ymax, 200)
            z_fine = np.linspace(zmin, zmax, 200)
            self.E_ref = E_ref_re_interp(y_fine, z_fine)+ \
                         1j*E_ref_im_interp(y_fine, z_fine)
            self.E_rec = receiver.E_re_interp(z_fine,y_fine)+ \
                         1j*receiver.E_im_interp(z_fine,y_fine)

            E_conv = self.E_ref * np.conj(self.E_rec)

            dy = y_fine[1] - y_fine[0]
            dz = z_fine[1] - z_fine[0]

            # integration over y direction
            SEy = np.sum(E_conv[:, :-1] + E_conv[:, 1:], axis=1) / 2 * dy
            # integration over z direction
            E_out = np.sum(SEy[:-1] + SEy[1:]) / 2 * dz
            return E_out
Esempio n. 40
0
def test_read_withValuesNearFillValue():
    # Regression test for ticket #5626
    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
    with netcdf_file(fname, maskandscale=True) as f:
        vardata = f.variables['var1_fillval0'][:]
        assert_mask_matches(vardata, [False, True, False])
Esempio n. 41
0
 def doit():
     with netcdf_file(filename, mmap=True) as f:
         return f.variables['lat'][:]
Esempio n. 42
0
]

fig, axarr = plt.subplots(3, 1, sharex='col', figsize=(8, 16))

for i, ncfile in enumerate(filenames1):

    a = 6400000.0 * afac  #/20.0
    g = 9.8
    kappa = 0.286
    M_air = 28.98
    k = 1.38E-23
    R = 8.3144621
    p0 = 100000 * pfac
    R_spec = R / M_air * 1000  # 1000g per kg
    cp = 1003
    f = netcdf.netcdf_file(ncfile, 'r', mmap=False)

    time = f.variables['time']
    lat = f.variables['lat']
    lon = f.variables['lon']
    lev = f.variables['lev']  #* pfac
    t = f.variables['ta']
    u = f.variables['ua']
    v = f.variables['va']
    o = f.variables['wap']
    lev = lev[:]  #* pfac
    p = lev[:]
    print lev[:]

    nlat = lat.shape[0]
    nlon = lon.shape[0]
def main(infile,
         outdir,
         fname_fmt='%Y%m%d.tif',
         params={},
         unscale=False,
         skip_existing=False,
         progress=None):
    """Extract all time slices from netCDF file naming them after date

    Parameters
    ----------
    ncfname : str
        path to netCDF file
    outdir : str
        output directory
    fname_fmt : str
        format string for datetime.datetime.strftime()
        that generates the target file name
        e.g. '%Y%m%d.tif' will give '20160101.tif'
    params : dict
        parameters passed to
        processing.runalg('gdalogr:translate')
    unscale : bool
        apply GDAL's -unscale option
        and also fix the nodata value
    skip_existing : bool
        skip existing files
        if False, existing files will be overwritten
    progress : QGIS progress, optional
        if run in QGIS
    """
    # set common parameters
    params = params.copy()
    params['INPUT'] = infile
    params['EXPAND'] = 0
    _common_extra = params.get('EXTRA', '')

    # get time data
    with netcdf.netcdf_file(infile, 'r') as ds:
        timevar = ds.variables['time']
        timedata = netcdf_utils.num2date(timevar.data, timevar)

        # set extent
        lonlim = _get_minmax(ds.variables['lon'].data)
        latlim = _get_minmax(ds.variables['lat'].data)
        params['PROJWIN'] = '{0[0]},{0[1]},{1[0]},{1[1]}'.format(
            lonlim, latlim)

        # unscale
        if unscale:
            if '-unscale' not in _common_extra:
                _common_extra += ' -unscale'
            # fixing nodata value (gdal_translate defficiency)
            new_nodata = _get_scaled_nodata(ds, progress)
            if new_nodata is not None:
                _common_extra += ' -a_nodata {}'.format(new_nodata)

    # export time slices to tif files
    for i, date in enumerate(timedata):
        # set specific parameters
        fname = date.strftime(fname_fmt)
        params['OUTPUT'] = os.path.join(outdir, fname)
        params['EXTRA'] = (_common_extra + ' -b {}'.format(i + 1)).strip()

        # skip existing
        if skip_existing and os.path.exists(params['OUTPUT']):
            continue

        # run gdal_translate with parameters
        processing.runalg("gdalogr:translate", params)

        if not os.path.exists(params['OUTPUT']):
            raise ValueError('Algorithm failed. '
                             'No output file was created. '
                             'Parameters to gdalogr:translate were:\n'
                             '{}'.format(params.__repr__()))

        # log
        if progress is not None:
            progress.setConsoleInfo("Time slice {} to file {}".format(
                date, fname))
Esempio n. 44
0
 for v in range(0, nvar):
     nsteps = 0
     for y in range(ystart, yend + 1):
         yst = str(10000 + y)[1:5]
         for m in range(0, 12):
             mst = str(101 + m)[1:3]
             #myfile = os.path.abspath(mydir+'/'+mycases[c]+'_'+mysites[c]+'_'+mycompsets[c]+ \
             #                         ".clm2."+hst+"."+yst+"-"+mst+".nc")
             #myfile = os.path.abspath(mydir+'/'+runnames[c]+".clm2."+hst+"."+yst+"-"+mst+".nc")
             myfile = os.path.abspath(mydir + '/' + runnames[c] + "." +
                                      options.model_name + "." + hst +
                                      "." + yst + "-" + mst + ".nc")
             #get units/long names from first file
             if (os.path.exists(myfile)):
                 if (y == ystart and m == 0 and c == 0):
                     nffile = netcdf.netcdf_file(myfile, "r")
                     varout = nffile.variables[myvars[v]]
                     var_long_names.append(
                         varout.long_name.decode('utf_8'))
                     nffile.close()
                     if (float(options.scale_factor) < -900):
                         if ('gC/m^2/s' in varout.units):
                             myscalefactors.append(3600 * 24)
                             var_units.append('g.C/m2/day')
                         else:
                             myscalefactors.append(1.0)
                             var_units.append(
                                 varout.units.decode('utf_8').replace(
                                     '^', ''))
                     else:
                         myscalefactors.append(
Esempio n. 45
0
import numpy as np
from scipy.io import netcdf
import math

# set general text options for plotting
plt.rc('text'           , usetex=True)
plt.rc('font'           , size=12)
plt.rc('legend'         , fontsize=12)
plt.rc('text.latex'     , preamble=r'\usepackage{cmbright}')

#

almost_black            = '#262626'


abrupt2xCO2_prp = netcdf.netcdf_file('../Data/mpiesm-1.2.prp_abrupt2xCO2_echam6_prp_mean_anomalies.nc')
abrupt2xCO2_bot = netcdf.netcdf_file('../Data/mpiesm-1.2.prp_abrupt2xCO2_echam6_BOT_mean_anomalies.nc')
abrupt4xCO2_prp = netcdf.netcdf_file('../Data/mpiesm-1.2.prp_abrupt4xCO2_echam6_prp_mean_anomalies.nc')
abrupt4xCO2_bot = netcdf.netcdf_file('../Data/mpiesm-1.2.prp_abrupt4xCO2_echam6_BOT_mean_anomalies.nc')
abrupt8xCO2_prp = netcdf.netcdf_file('../Data/mpiesm-1.2.prp_abrupt8xCO2_echam6_prp_mean_anomalies.nc')
abrupt8xCO2_bot = netcdf.netcdf_file('../Data/mpiesm-1.2.prp_abrupt8xCO2_echam6_BOT_mean_anomalies.nc')
abrupt16xCO2_prp = netcdf.netcdf_file('../Data/mpiesm-1.2.prp_abrupt16xCO2_echam6_prp_mean_anomalies.nc')
abrupt16xCO2_bot = netcdf.netcdf_file('../Data/mpiesm-1.2.prp_abrupt16xCO2_echam6_BOT_mean_anomalies.nc')

amip4xCO2 = netcdf.netcdf_file('../Data/BOT_echam-6.3.02p4_amip4xCO2_1979-2008_timmean_fldmean_anomaly.nc')

# ---------------------------------------------------

fig, axes = plt.subplots(1,1, figsize=(5,7))

id=np.arange(0,299)
Esempio n. 46
0
from scipy.io import netcdf
import matplotlib.pyplot as plt
import numpy as np
import math
import sys

if len(sys.argv) != 2:
    print "Error! Wrong number of arguments"
    exit(1)

filename = sys.argv[1]  #output file

f = netcdf.netcdf_file(filename, 'r', mmap=False)
theta = f.variables['theta_coil'][()]
zeta = f.variables['zeta_coil'][()]
nfp = f.variables['nfp'][()]  #number of filed periodicity??
net_poloidal_current_Amperes = f.variables['net_poloidal_current_Amperes'][(
)]  #??
current_potential = f.variables['current_potential'][()]
f.close()

plt.figure()
plt.contour(zeta, theta, np.transpose(current_potential[9, :, :]), 10)
plt.show()
Esempio n. 47
0
def main():

    #----------------
    # Initializations
    #----------------
    dataDir = '/data1/ancillary_data/fl0/eol/'
    dataFileTag = 'flab'
    fileExtTag = 'cdf'
    outDataDir = '/data1/ancillary_data/fl0/eol/'
    yearstr = '2016'

    #--------------------------
    # Initialize variable lists
    #--------------------------
    tempList = []
    timeList = []
    rhList = []
    pressList = []
    tempDPList = []
    wdirList = []
    wspdList = []
    wmaxList = []
    wsdevList = []
    rmind = []

    #--------------------
    # Search through base
    # directory for files
    #--------------------
    files = glob.glob(dataDir + 'flab.' + yearstr + '*.' + fileExtTag)
    if not files:
        print 'No files found for year: %s' % yearstr
        sys.exit()
    else:
        print ' %d files found for year: %s' % (len(files), yearstr)

    #-------------------------
    # Loop through found files
    #-------------------------
    for indvfile in files:

        cdfname = netcdf.netcdf_file(indvfile, 'r',
                                     mmap=False)  # Open netcdf file
        # Get variables
        base_time = cdfname.variables['base_time']
        time_offset = cdfname.variables['time_offset']
        temp = cdfname.variables['tdry']
        rh = cdfname.variables['rh']
        press = cdfname.variables['pres']
        tempDP = cdfname.variables['dp']
        wdir = cdfname.variables['wdir']
        wspd = cdfname.variables['wspd']
        wmax = cdfname.variables['wmax']
        wsdev = cdfname.variables['wsdev']
        cdfname.close()

        #----------------------------------
        # Create an actual time vector from
        # basetime and offset (unix time)
        #----------------------------------
        total_time = base_time[()] + time_offset.data
        #total_time = [dt.datetime.utcfromtimestamp(indtime) for indtime in total_time]
        total_time = total_time.tolist()  # Convert numpy array to list

        #------------------------------------------------------------
        # There seems to be a timing issue in some of the data files.
        # time_offset can have unusually large numbers. Need to check
        # for this and delete observations.
        #------------------------------------------------------------
        for ind, indvtime in enumerate(total_time):
            # Identify erroneous time_offset values
            try:
                total_time[ind] = dt.datetime.utcfromtimestamp(indvtime)
            except ValueError:
                total_time[ind] = -9999
                rmind.append(ind)

        #------------------------------------------------------
        # Remove observations with erroneous time_offset values
        #------------------------------------------------------
        total_time[:] = [
            item for ind, item in enumerate(total_time) if ind not in rmind
        ]  # List
        temp.data = np.delete(temp.data, rmind)  # Numpy array
        rh.data = np.delete(rh.data, rmind)  # Numpy array
        press.data = np.delete(press.data, rmind)  # Numpy array
        tempDP.data = np.delete(tempDP.data, rmind)  # Numpy array
        wdir.data = np.delete(wdir.data, rmind)
        wspd.data = np.delete(wspd.data, rmind)
        wmax.data = np.delete(wmax.data, rmind)
        wsdev.data = np.delete(wsdev.data, rmind)  # Numpy array

        #---------------------
        # Append to main lists
        #---------------------
        timeList.extend(total_time)
        tempList.extend(temp.data)
        rhList.extend(rh.data)
        pressList.extend(press.data)
        tempDPList.extend(tempDP.data)
        wdirList.extend(wdir.data)
        wspdList.extend(wspd.data)
        wmaxList.extend(wmax.data)
        wsdevList.extend(wsdev.data)

    #------------------------
    # Sort list based on time
    # This returns a tuple
    #------------------------
    timeList, tempList, rhList, pressList, tempDPList, wdirList, wspdList, wmaxList, wsdevList = zip(
        *sorted(
            zip(timeList, tempList, rhList, pressList, tempDPList, wdirList,
                wspdList, wmaxList, wsdevList)))

    #-----------------------------------
    # Construct a vector of string years
    #-----------------------------------
    years = ["{0:02d}".format(indtime.year) for indtime in timeList]
    months = ["{0:02d}".format(indtime.month) for indtime in timeList]
    days = ["{0:02d}".format(indtime.day) for indtime in timeList]
    hours = ["{0:02d}".format(indtime.hour) for indtime in timeList]
    minutes = ["{0:02d}".format(indtime.minute) for indtime in timeList]

    #--------------------------
    # Write data to output file
    #--------------------------
    with open(outDataDir + 'fl0_met_data_' + years[0] + '_v2.txt',
              'w') as fopen:
        fopen.write(
            'Year Month Day Hour Minute Temperature[C] RelativeHumidity[%] Pressure[mbars] DewPointTemperature[C] WinDir[rN] WinSpeed[m/s] WindSpeedMax[m/s]\n'
        )
        for line in zip(years, months, days, hours, minutes, tempList, rhList,
                        pressList, tempDPList, wdirList, wspdList, wmaxList):
            fopen.write(
                '%-4s %-5s %-3s %-4s %-6s %-14.1f %-19.1f %-15.1f %-22.6f %-15.4f %-15.4f %-15.4f\n'
                % line)
Esempio n. 48
0
header = ['ncols  3600\n']
#header.append('nrows  2053\n') #Oahu
header.append('nrows  1500\n')
#header=['ncols  720\n']# % 30min
#header.append('nrows  360\n')# % 30min
#header.append('xllcorner -158.40027777733\n') #Oahu
#header.append('yllcorner 21.22999999938\n') #Oahu
header.append('xllcorner -180\n')
header.append('yllcorner -60\n')
#header.append('cellsize  0.000277777778\n')
header.append('cellsize  0.1\n')
#header.append('cellsize  0.5\n')# %30min
header.append('NODATA_value  -9999\n')

NetCDF_fn = NetCDF_fn1 + str(sim_first + 1) + '.nc'  # set input netcdf name
f = netcdf.netcdf_file(NetCDF_fn, 'r')
#if nutrient == 'Discharge':
#   valueAcc = f.variables['discharge'][:].astype(np.float32)
#else:
#    valueAcc = f.variables[nutrient][:].astype(np.float16)/1000
#print valueAcc.shape
value = f.variables[nutrient][[0][:][:]].astype(np.float32)  #/1000
maxRunAvr = np.concatenate(value)
f.close()
np.putmask(maxRunAvr, maxRunAvr > 0, 0)
#stdRas = np.copy(averageRas)
#rangeRas = np.copy(averageRas)

for r in range(0, 1500):
    #for r in range(0, 2053): #Oahu
    print r
Esempio n. 49
0
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import json
from scipy.io import netcdf

with netcdf.netcdf_file('MSR-2.nc', mmap=False) as netcdf_file:
    variables = netcdf_file.variables
print(*variables, sep=', ')
print(variables['time'].dimensions)
print(variables['Average_O3_column'].dimensions)
print(variables['time'].units, variables['Average_O3_column'].units)
Dushanbelong = 68.75
Dushanbelat = 38.57
a = np.searchsorted(variables['longitude'].data, Dushanbelong, sorter=None)#Вычисляем узел сетки с похожими коориданатми
b = np.searchsorted(variables['latitude'].data, Dushanbelat, sorter=None)
plt.plot(variables['time'][:], variables['Average_O3_column'][:, b, a], label = 'All data', color='grey')#рисуем график зависимости озона от времени
plt.plot(variables['time'][::12], variables['Average_O3_column'][::12, b, a], label = 'January', color='orange')#для января
plt.plot(variables['time'][6::12], variables['Average_O3_column'][6::12, b, a], label = 'July', color='purple')
plt.xlabel("Months since 1970-01-15")
plt.ylabel('Dobson units')
plt.grid()
plt.legend()
plt.suptitle('Monthly average data 1979-2018')
plt.savefig('ozon.png')
plt.show()
minall=min(variables['Average_O3_column'][:, b, a])#вычисляем минимальные/максимальные/средние значения для данных за весь период, январь и июль
avgall=np.mean(variables['Average_O3_column'][:, b, a])
maxall=max(variables['Average_O3_column'][:, b, a])
minjan=min(variables['Average_O3_column'][::12, b, a])
avgjan=np.mean(variables['Average_O3_column'][::12, b, a])
Esempio n. 50
0
def readNCFromTar(tar, file, var):
    TF = tarfile.open(tar, 'r')
    member = [m for m in TF.getmembers() if file in m.name][0]
    nc = netcdf.netcdf_file(TF.extractfile(member), 'r')
    return nc.variables[var]
Esempio n. 51
0
def read_fesom_mesh(path, alpha, beta, gamma, read_diag=True):
    '''
    .. note:: Deprecated in pyfesom 0.1
          `read_fesom_mesh` will be removed in future, it is replaced by
          `load_mesh`. 
          
    ''' 

    mesh=fesom_mesh()
    mesh.path=path
    mesh.alpha=alpha
    mesh.beta=beta
    mesh.gamma=gamma    

    nod2dfile=mesh.path+'nod2d.out'
    elm2dfile=mesh.path+'elem2d.out'
    aux3dfile=mesh.path+'aux3d.out'
    nod3dfile=mesh.path+'nod3d.out'


    file_content = pd.read_csv(nod2dfile, delim_whitespace=True, skiprows=1, \
                                          names=['node_number','x','y','flag'] )
    mesh.x2=file_content.x.values
    mesh.y2=file_content.y.values
    mesh.n2d=len(mesh.x2)
    
    file_content = pd.read_csv(elm2dfile, delim_whitespace=True, skiprows=1, \
                                          names=['first_elem','second_elem','third_elem'])
    mesh.elem=file_content.values-1
    mesh.e2d=np.shape(mesh.elem)[0]

    mesh.n3d=int(open(mesh.path+'nod3d.out').readline().rstrip())
    df = pd.read_csv(nod3dfile, delim_whitespace=True, skiprows=1, \
                                names=['node_number','x','y','z','flag'])
    zcoord = -df.z.values
    mesh.zlevs = np.unique(zcoord)

    with open(aux3dfile) as f:
        mesh.nlev=int(next(f))
        mesh.n32=np.array([next(f) for x in range(mesh.n2d*mesh.nlev)]).astype(int).reshape(mesh.n2d,mesh.nlev)   
    mesh.topo=np.zeros(shape=(mesh.n2d))
    for prof in mesh.n32:           
        ind_nan = prof[prof>0]
        ind_nan=ind_nan[-1]
        mesh.topo[prof[0]-1]=zcoord[ind_nan-1]
    ########################################### 
    #here we compute the volumes of the triangles
    #this should be moved into fesom generan mesh output netcdf file
    #
    r_earth=6371000.0
    rad=np.pi/180
    edx=mesh.x2[mesh.elem]
    edy=mesh.y2[mesh.elem]
    ed=np.array([edx, edy])

    jacobian2D=ed[:, :, 1]-ed[:, :, 0]
    jacobian2D=np.array([jacobian2D, ed[:, :, 2]-ed[:, :, 0]])
    for j in range(2):
        mind = [i for (i, val) in enumerate(jacobian2D[j,0,:]) if val > 355]
        pind = [i for (i, val) in enumerate(jacobian2D[j,0,:]) if val < -355]
        jacobian2D[j,0,mind]=jacobian2D[j,0,mind]-360
        jacobian2D[j,0,pind]=jacobian2D[j,0,pind]+360

    jacobian2D=jacobian2D*r_earth*rad

    for k in range(2):
        jacobian2D[k,0,:]=jacobian2D[k,0,:]*np.cos(edy*rad).mean(axis=1)

    mesh.voltri = abs(np.linalg.det(np.rollaxis(jacobian2D, 2)))/2.
    
    # compute the 2D lump operator
    cnt=np.array((0,)*mesh.n2d)
    mesh.lump2=np.array((0.,)*mesh.n2d)
    for i in range(3):
        for j in range(mesh.e2d):
            n=mesh.elem[j,i]
            #cnt[n]=cnt[n]+1
            mesh.lump2[n]=mesh.lump2[n]+mesh.voltri[j]
    mesh.lump2=mesh.lump2/3.
    ###########################################
    #here we read the 3D cluster volumes
    if (read_diag):
        f = netcdf.netcdf_file(mesh.path+'mesh.diag.nc', 'r')
        mesh.cluster_vol3=f.variables['cluster_vol'].data
        mesh.cluster_vol2=f.variables['cluster_area'].data
        f.close()
    else:
        mesh.cluster_vol3=0
        mesh.cluster_vol2=0
    #we should rotate the mesh to the geographical coordinates
    (mesh.x2,mesh.y2)=ut.scalar_r2g(mesh.alpha,mesh.beta,mesh.gamma,mesh.x2,mesh.y2)
    d=mesh.x2[mesh.elem].max(axis=1)-mesh.x2[mesh.elem].min(axis=1)
    mesh.no_cyclic_elem = [i for (i, val) in enumerate(d) if val < 100]
    return mesh
    def PyExec(self):

        # Get file path
        file_name = self.getPropertyValue("InputFile")

        # Load trajectory file
        trajectory = netcdf.netcdf_file(file_name, mode="r")

        logger.information("Loading particle id's and coordinate array...")
        start_time = time.time()

        # netcdf object containing the particle id numbers
        description = (trajectory.variables["description"])[:]
        # Convert description object to string via for loop. The original object has strange formatting
        particleID = ''
        for i in description:
            particleID += i.decode('UTF-8')
        # Extract particle id's from string using regular expressions
        p_atoms = re.findall(r"A\('[a-z]+\d+',\d+", particleID)

        # Many-to-one structures. Identify the set of atomic species present (list structure 'elements')
        # in the simulation and repackage particles into a dictionary
        # 'particles_to_species' with structure id number -> species
        atoms_to_species = {}
        species_to_atoms = {}
        elements = []

        # Populate the particles_to_species dictionary and the elements list
        for j in p_atoms:
            key = re.findall(r"',\d+", j)[0]
            key = int(re.findall(r"\d+", key)[0])
            element = re.findall(r"[a-z]+", j)[0]
            if element not in elements:
                elements.append(str(element))
            atoms_to_species[key] = str(element)

        # Initialise lists in the species_to_particles dictionary
        for j in elements:
            species_to_atoms[j] = []

        # Populate the species_to_particles dictionary
        for j in p_atoms:
            key = re.findall(r"',\d+", j)[0]
            key = int(re.findall(r"\d+", key)[0])
            element = re.findall(r"[a-z]+", j)[0]
            species_to_atoms[element].append(key)

        # Coordinate array. Shape: timesteps x (# of particles) x (# of spatial dimensions)
        configuration = trajectory.variables["configuration"]

        # Extract useful simulation parameters
        # Number of species present in the simulation
        n_species = len(elements)
        # Number of particles present in the simulation
        n_particles = len(atoms_to_species)
        # Number of timesteps in the simulation
        n_timesteps = int(configuration.shape[0])
        # Number of spatial dimensions
        n_dimensions = int(configuration.shape[2])

        logger.information(str(time.time() - start_time) + " s")

        logger.information("Transforming coordinates...")
        start_time = time.time()

        # Box size for each timestep. Shape: timesteps x (3 consecutive 3-vectors)
        box_size = trajectory.variables["box_size"]

        # Reshape the paralellepipeds into 3x3 tensors for coordinate transformations.
        # Shape: timesteps x 3 vectors x (# of spatial dimensions)
        box_size_tensors = np.array(
            [box_size[j].reshape((3, 3)) for j in range(n_timesteps)])

        # Copy the configuration object into a numpy array
        configuration_copy = np.array(
            [configuration[i] for i in range(n_timesteps)])

        # Swap the time and particle axes
        configuration_copy = np.swapaxes(configuration_copy, 0,
                                         1)  #/1.12484770

        # Get scaled coordinates (assumes orthogonal simulation box)
        scaled_coords = np.zeros(np.shape(configuration_copy))
        for i in range(n_particles):
            for j in range(n_timesteps):
                for k in range(n_dimensions):
                    scaled_coords[i, j, k] = configuration_copy[
                        i, j, k] / box_size_tensors[j, k, k]

        # # Transform particle trajectories (configuration array) to Cartesian coordinates at each time step

        logger.information(str(time.time() - start_time) + " s")

        logger.information("Calculating velocities...")
        start_time = time.time()

        # Initialise velocity arrray. Note that the first dimension is 2 elements shorter than the coordinate array.
        # Use finite difference methods to evaluate the time-derivative to 1st order
        # Shape: (# of particles) x (timesteps-2) x (# of spatial dimensions)

        velocities = np.zeros((n_particles, n_timesteps - 1, n_dimensions))
        v1 = scaled_coords[:, 1:-1, :] - scaled_coords[:, :-2, :] - np.round(
            scaled_coords[:, 1:-1, :] - scaled_coords[:, :-2, :])
        v2 = scaled_coords[:, 2:, :] - scaled_coords[:, 1:-1, :] - np.round(
            scaled_coords[:, 2:, :] - scaled_coords[:, 1:-1, :])
        velocities[:, :-1, :] = (v1 + v2) / 2.

        # Transform velocities (configuration array) back to Cartesian coordinates at each time step
        velocities = np.array([[
            np.dot(box_size_tensors[j + 1], np.transpose(velocities[i, j]))
            for j in range(n_timesteps - 1)
        ] for i in range(n_particles)])
        logger.information(str(time.time() - start_time) + " s")

        logger.information(
            "Calculating velocity cross-correlations (resource intensive calculation)..."
        )
        start_time = time.time()
        correlation_length = n_timesteps - 1
        correlations = np.zeros((n_species, n_species, correlation_length))
        # Array for counting particle pairings
        correlation_count = np.zeros((n_species, n_species))

        # Compute cross-correlations for each pair of particles in each spatial coordinate
        for i in range(n_particles):
            for j in range(i + 1, n_particles):
                # Retrieve particle indices from the 'particles' dictionary and
                # determine the relevant position in the 'correlations' matrices
                k = elements.index(atoms_to_species[i])
                l = elements.index(atoms_to_species[j])
                # Check for the order of elements (ensures upper triangular matrix form & consistent order of operations)
                if k <= l:
                    correlation_temp = self.cross_correlation(
                        velocities[i], velocities[j])
                    correlations[k, l] += correlation_temp
                    correlation_count[k, l] += 1
                else:
                    correlation_temp = self.cross_correlation(
                        velocities[j], velocities[i])
                    correlations[l, k] += correlation_temp
                    correlation_count[l, k] += 1

        logger.information(str(time.time() - start_time) + " s")

        # Neutron coherent scattering lengths (femtometres)
        # Sources:
        # https://www.ncnr.nist.gov/resources/n-lengths/list.html
        # http://www.ati.ac.at/~neutropt/scattering/RecommendedScatteringLengthsOfElements.PDF
        Coh_b = {
            'h': -3.7409,
            'he': 3.26,
            'li': -1.90,
            'be': 7.79,
            'b': 5.30,
            'c': 6.6484,
            'n': 9.36,
            'o': 5.805,
            'f': 5.654,
            'ne': 4.60,
            'na': 3.63,
            'mg': 5.375,
            'al': 3.449,
            'si': 4.15071,
            'p': 5.13,
            's': 2.847,
            'cl': 9.5792,
            'ar': 1.909,
            'k': 3.67,
            'ca': 4.70,
            'sc': 12.29,
            'ti': -3.37,
            'v': -0.443,
            'cr': 3.635,
            'mn': -3.73,
            'fe': 9.45,
            'co': 2.49,
            'ni': 10.3,
            'cu': 7.718,
            'zn': 5.680,
            'ga': 7.288,
            'ge': 8.185,
            'as': 6.58,
            'se': 7.970,
            'br': 6.79,
            'kr': 7.81,
            'rb': 7.08,
            'sr': 7.02,
            'y': 7.75,
            'zr': 7.16,
            'nb': 7.054,
            'mo': 6.715,
            'tc': 6.8,
            'ru': 7.02,
            'rh': 5.90,
            'pd': 5.91,
            'ag': 5.922,
            'cd': 4.83,
            'in': 4.065,
            'sn': 6.225,
            'sb': 5.57,
            'te': 5.68,
            'i': 5.28,
            'xe': 4.69,
            'cs': 5.42,
            'ba': 5.07,
            'la': 8.24,
            'ce': 4.84,
            'pr': 4.58,
            'nd': 7.69,
            'pm': 12.6,
            'sm': 0.00,
            'eu': 5.3,
            'gd': 9.5,
            'tb': 7.34,
            'dy': 16.9,
            'ho': 8.44,
            'er': 7.79,
            'tm': 7.07,
            'yb': 12.41,
            'lu': 7.21,
            'hf': 7.77,
            'ta': 6.91,
            'w': 4.755,
            're': 9.2,
            'os': 10.7,
            'ir': 10.6,
            'pt': 9.60,
            'au': 7.63,
            'hg': 12.66,
            'tl': 8.776,
            'pb': 9.401,
            'bi': 8.532,
            'po': None,
            'at': None,
            'rn': None,
            'fr': None,
            'ra': 10.0,
            'ac': None,
            'th': 10.31,
            'pa': 9.1,
            'u': 8.417,
            'np': 10.55,
            'pu': None,
            'am': 8.3,
            'cm': 9.5
        }

        logger.information(
            "Averaging correlation Fourier transforms & scaling with the coherent neutron scattering lenghts..."
        )
        start_time = time.time()

        # Scaling cross-correlations with the scattering lengths
        for i in range(n_species):
            for j in range(i, n_species):
                correlations[i, j] = correlations[i, j] * Coh_b[
                    elements[i]] * Coh_b[elements[j]] / correlation_count[i, j]

        logger.information(str(time.time() - start_time) + " s")

        # Generate a list of row names according to the atomic species present in the simulation
        row_names = []
        for i in range(n_species):
            for j in range(i, n_species):
                row_names.append(elements[i].capitalize() + ' and ' +
                                 elements[j].capitalize())

        # Initialise & populate the output_ws workspace
        nrows = int((n_species * n_species - n_species) / 2 + n_species)
        #nbins=(np.shape(correlations)[2])
        yvals = np.empty(0)
        for i in range(n_species):
            for j in range(i, n_species):
                # Add folded correlations to the array passed to the workspace
                yvals = np.append(yvals,
                                  self.fold_correlation(correlations[i, j]))

        # Timesteps between coordinate positions
        step = float(self.getPropertyValue("Timestep"))

        xvals = np.arange(0, np.ceil((n_timesteps - 1) / 2.0)) * step / 1000.0

        evals = np.zeros(np.shape(yvals))

        output_name = self.getPropertyValue("OutputWorkspace")
        output_ws = CreateWorkspace(OutputWorkspace=output_name,
                                    DataX=xvals,
                                    DataY=yvals,
                                    DataE=evals,
                                    NSpec=nrows,
                                    VerticalAxisUnit="Text",
                                    VerticalAxisValues=row_names,
                                    UnitX="ps")

        # Set output workspace to output_ws
        self.setProperty('OutputWorkspace', output_ws)
Esempio n. 53
0
def test_read_write_files():
    # test round trip for example file
    cwd = os.getcwd()
    try:
        tmpdir = tempfile.mkdtemp()
        os.chdir(tmpdir)
        with make_simple('simple.nc', 'w') as f:
            pass
        # read the file we just created in 'a' mode
        with netcdf_file('simple.nc', 'a') as f:
            check_simple(f)
            # add something
            f._attributes['appendRan'] = 1

        # To read the NetCDF file we just created::
        with netcdf_file('simple.nc') as f:
            # Using mmap is the default
            assert_(f.use_mmap)
            check_simple(f)
            assert_equal(f._attributes['appendRan'], 1)

        # Read it in append (and check mmap is off)
        with netcdf_file('simple.nc', 'a') as f:
            assert_(not f.use_mmap)
            check_simple(f)
            assert_equal(f._attributes['appendRan'], 1)

        # Now without mmap
        with netcdf_file('simple.nc', mmap=False) as f:
            # Using mmap is the default
            assert_(not f.use_mmap)
            check_simple(f)

        # To read the NetCDF file we just created, as file object, no
        # mmap.  When n * n_bytes(var_type) is not divisible by 4, this
        # raised an error in pupynere 1.0.12 and scipy rev 5893, because
        # calculated vsize was rounding up in units of 4 - see
        # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
        with open('simple.nc', 'rb') as fobj:
            with netcdf_file(fobj) as f:
                # by default, don't use mmap for file-like
                assert_(not f.use_mmap)
                check_simple(f)

        # Read file from fileobj, with mmap
        with open('simple.nc', 'rb') as fobj:
            with netcdf_file(fobj, mmap=True) as f:
                assert_(f.use_mmap)
                check_simple(f)

        # Again read it in append mode (adding another att)
        with open('simple.nc', 'r+b') as fobj:
            with netcdf_file(fobj, 'a') as f:
                assert_(not f.use_mmap)
                check_simple(f)
                f.createDimension('app_dim', 1)
                var = f.createVariable('app_var', 'i', ('app_dim',))
                var[:] = 42

        # And... check that app_var made it in...
        with netcdf_file('simple.nc') as f:
            check_simple(f)
            assert_equal(f.variables['app_var'][:], 42)

    except:
        os.chdir(cwd)
        shutil.rmtree(tmpdir)
        raise
    os.chdir(cwd)
    shutil.rmtree(tmpdir)
Esempio n. 54
0
def test_read_with2dVar():
    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
    with netcdf_file(fname, maskandscale=True) as f:
        vardata = f.variables['var7_2d'][:]
        assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
Esempio n. 55
0
def pyseidon_to_netcdf(fvcom, filename, debug):
    """
    saves fvcom object in a pickle file

    inputs:
      - fvcom = fvcom pyseidon object
      - filename = file name, string
    """
    #Define bounding box
    if debug: print "Computing bounding box..."
    if fvcom.Grid._ax == []:
        lon = fvcom.Grid.lon[:]
        lat = fvcom.Grid.lat[:]
        fvcom.Grid._ax = [lon.min(), lon.max(), lat.min(), lat.max()]
    filename = filename + ".nc"
    f = netcdf.netcdf_file(filename, 'w')
    #history attribut
    f.history = fvcom.History[:]

    #create dimensions
    if not fvcom.Variables._3D:
        ##2D dimensions
        dims = {
            'three': 3,
            'four': 4,
            'nele': fvcom.Grid.nele,
            'node': fvcom.Grid.nnode,
            'siglay': 2,
            'siglev': 3,
            'time': fvcom.Variables.julianTime.shape[0]
        }
    else:
        ##3D dimensions
        dims = {
            'three': 3,
            'four': 4,
            'nele': fvcom.Grid.nele,
            'node': fvcom.Grid.nnode,
            'siglay': fvcom.Grid.nlevel,
            'siglev': fvcom.Grid.nlevel + 1,
            'vertshear': fvcom.Grid.nlevel - 1,
            'time': fvcom.Variables.julianTime.shape[0]
        }
    for key in dims.keys():
        f.createDimension(key, dims[key])

    #list of potential 2D var
    varname = [
        'el', 'ua', 'va', 'julianTime', 'matlabTime', 'depth_av_flow_dir',
        'hori_velo_norm', 'depth_av_vorticity', 'depth_av_power_density',
        'depth_av_power_assessment'
    ]
    #list of potential 2D grid var
    gridname = [
        'a1u', 'a2u', 'trinodes', 'triele', 'xc', 'x', 'yc', 'y', 'lonc',
        'lon', 'latc', 'lat', 'aw0', 'awy', 'awx', 'h', 'hc', 'depth2D'
    ]
    #list of potential 3D var * grid var
    if fvcom.Variables._3D:
        varname = varname + [
            'u', 'v', 'flow_dir', 'velo_norm', 'verti_shear', 'vorticity',
            'power_density'
        ]
        gridname = gridname + ['siglay', 'siglev', 'depth']

    #load in netcdf file
    if debug: print "Loading variables' matrices in nc file..."
    for var in varname:
        if var in [
                'ua', 'va', 'depth_av_flow_dir', 'depth_av_vorticity',
                'depth_av_power_density', 'depth_av_power_assessment',
                'hori_velo_norm'
        ]:
            try:
                tmp_var = f.createVariable(var, 'float', ('time', 'nele'))
                f.variables[var][:] = getattr(fvcom.Variables, var)[:]
            except AttributeError:
                pass
        if var in ['julianTime', 'matlabTime']:
            try:
                tmp_var = f.createVariable(var, 'float', ('time', ))
                f.variables[var][:] = getattr(fvcom.Variables, var)[:]
            except AttributeError:
                pass
        if var == 'el':
            try:
                tmp_var = f.createVariable(var, 'float', ('time', 'node'))
                f.variables[var][:] = getattr(fvcom.Variables, var)[:]
            except AttributeError:
                pass
        if fvcom.Variables._3D:
            if var in [
                    'u', 'v', 'flow_dir', 'velo_norm', 'vorticity',
                    'power_density'
            ]:
                try:
                    tmp_var = f.createVariable(var, 'float',
                                               ('time', 'siglay', 'nele'))
                    f.variables[var][:] = getattr(fvcom.Variables, var)[:]
                except AttributeError:
                    pass
            if var in ['verti_shear']:
                try:
                    tmp_var = f.createVariable(var, 'float',
                                               ('time', 'vertshear', 'nele'))
                    f.variables[var][:] = getattr(fvcom.Variables, var)[:]
                except AttributeError:
                    pass

    if debug: print "Loading grid' matrices in nc file..."
    for grd in gridname:
        if grd in ['xc', 'yc', 'lonc', 'latc', 'hc']:
            try:
                tmp_var = f.createVariable(grd, 'float', ('nele', ))
                tmp_var[:] = getattr(fvcom.Grid, grd)[:]
                #f.variables[grd][:] = getattr(fvcom.Grid, grd)[:]
            except AttributeError:
                pass
        if grd == 'depth2D':
            try:
                depth2D = f.createVariable(grd, 'float', ('time', 'nele'))
                f.variables[grd][:] = getattr(fvcom.Grid, grd)[:]
            except AttributeError:
                pass
        if grd in ['x', 'y', 'lon', 'lat', 'h']:
            try:
                tmp_var = f.createVariable(grd, 'float', ('node', ))
                f.variables[grd][:] = getattr(fvcom.Grid, grd)[:]
            except AttributeError:
                pass
        if grd in ['triele', 'trinodes']:
            try:
                tmp_var = f.createVariable(grd, 'i', ('nele', 'three'))
                f.variables[grd][:] = getattr(fvcom.Grid, grd)[:]
            except AttributeError:
                pass
        if grd in ['a1u', 'a2u']:
            try:
                tmp_var = f.createVariable(grd, 'i', ('four', 'nele'))
                f.variables[grd][:] = getattr(fvcom.Grid, grd)[:]
            except AttributeError:
                pass
        if grd in ['aw0', 'awy', 'awx']:
            try:
                tmp_var = f.createVariable(grd, 'i', ('three', 'nele'))
                f.variables[grd][:] = getattr(fvcom.Grid, grd)[:]
            except AttributeError:
                pass
        if fvcom.Variables._3D:
            if grd == 'siglay':
                try:
                    siglay = f.createVariable(grd, 'float', ('siglay', 'node'))
                    f.variables[grd][:] = getattr(fvcom.Grid, grd)[:]
                except AttributeError:
                    pass
            if grd == 'siglev':
                try:
                    siglev = f.createVariable(grd, 'float', ('siglev', 'node'))
                    f.variables[grd][:] = getattr(fvcom.Grid, grd)[:]
                except AttributeError:
                    pass
            if grd == 'depth':
                try:
                    depth = f.createVariable(grd, 'float',
                                             ('time', 'siglay', 'nele'))
                    f.variables[grd][:] = getattr(fvcom.Grid, grd)[:]
                except AttributeError:
                    pass
    f.close()
    if debug: print "...done"
Esempio n. 56
0
def read_amber_traj(parm7_file, nc_file, mdfrc_file, mden_file):
    """The amber trajectory includes:
    * nc, NetCDF format, stores coordinates
    * mdfrc, NetCDF format, stores forces
    * mden, text format, stores energies
    * parm7, text format, stores types
    """

    flag = False
    amber_types = []
    with open(parm7_file) as f:
        for line in f:
            if line.startswith("%FLAG"):
                flag = line.startswith("%FLAG AMBER_ATOM_TYPE")
            elif flag:
                if line.startswith("%FORMAT"):
                    fmt = re.findall(r'\d+', line)
                    fmt0 = int(fmt[0])
                    fmt1 = int(fmt[1])
                else:
                    for ii in range(fmt0):
                        start_index = ii * fmt1
                        end_index = (ii + 1) * fmt1
                        if end_index >= len(line):
                            continue
                        amber_types.append(line[start_index:end_index].strip())

    with netcdf.netcdf_file(nc_file, 'r') as f:
        coords = np.array(f.variables["coordinates"][:])
        cell_lengths = np.array(f.variables["cell_lengths"][:])
        cell_angles = np.array(f.variables["cell_angles"][:])
        if np.all(cell_angles > 89.99) and np.all(cell_angles < 90.01):
            # only support 90
            # TODO: support other angles
            shape = cell_lengths.shape
            cells = np.zeros((shape[0], 3, 3))
            for ii in range(3):
                cells[:, ii, ii] = cell_lengths[:, ii]
        else:
            raise RuntimeError("Unsupported cells")

    with netcdf.netcdf_file(mdfrc_file, 'r') as f:
        forces = np.array(f.variables["forces"][:])

    # energy
    energies = []
    with open(mden_file) as f:
        for line in f:
            if line.startswith("L6"):
                s = line.split()
                if s[2] != "E_pot":
                    energies.append(float(s[2]))

    atom_names, atom_types, atom_numbs = np.unique(amber_types,
                                                   return_inverse=True,
                                                   return_counts=True)

    data = {}
    data['atom_names'] = list(atom_names)
    data['atom_numbs'] = list(atom_numbs)
    data['atom_types'] = atom_types
    data['forces'] = forces * force_convert
    data['energies'] = np.array(energies) * energy_convert
    data['coords'] = coords
    data['cells'] = cells
    data['orig'] = np.array([0, 0, 0])
    return data
Esempio n. 57
0
            thePrec = np.transpose(thePrec)
            # print(thePrec)
            # exit()
            #print (thePrec.shape)
            #exit()
            lats = extract_h4_by_name(the_filename, 'Grid/lat')
            lons = extract_h4_by_name(the_filename, 'Grid/lon')
            isif = thePrec.shape
            allday_prec.append(thePrec)
        allday_prec = np.array(allday_prec)

        daily_prec = np.nanmean(allday_prec, axis=0)

        outfile = output_dir + 'daily_precipitation_' + idate + '.nc4'
        # create nc file
        fid = netcdf.netcdf_file(outfile, 'w')
        # create dimension variable, so we can use it in the netcdf
        fid.createDimension('longitude', isif[1])
        fid.createDimension('latitude', isif[0])

        nc_var = fid.createVariable('nlat', 'f4', ('latitude', ))
        nc_var[:] = lats
        nc_var.long_name = 'latitude'
        nc_var.standard_name = 'latitude'
        nc_var.units = 'degrees_north'

        nc_var = fid.createVariable('nlon', 'f4', ('longitude', ))
        nc_var[:] = lons
        nc_var.long_name = 'longitude'
        nc_var.standard_name = 'longitude'
        nc_var.units = 'degrees_east'
Esempio n. 58
0
    yrstr.append(yrC)
ind = range(nsim)
nfile = np.size(yrstr)
"""
check before this line
"""

#%%
#npert = 1
land_mask = 0
for vi in range(nvar):
    for i in ind:
        atmdir = indir + exper + sim[i] + '/' + plat + 'pp/' + diag + '/'
        stafile = atmdir + diag + '.static.nc'
        fs = []
        fs.append(nc.netcdf_file(stafile, 'r', mmap=True))
        bk = fs[-1].variables['bk'][:].astype(np.float64)
        pk = fs[-1].variables['pk'][:].astype(np.float64)
        lat = fs[-1].variables['lat'][:].astype(np.float64)
        lon = fs[-1].variables['lon'][:].astype(np.float64)
        phalf = fs[-1].variables['phalf'][:].astype(np.float64)
        zsurf = fs[-1].variables['zsurf'][:].astype(np.float64)
        if ('land_mask' in fs[-1].variables):
            land_mask = fs[-1].variables['land_mask'][:].astype(np.float64)
        fs[-1].close()
        nlat = np.size(lat)
        nlon = np.size(lon)
        nphalf = np.size(phalf)
        #%%
        filedir = atmdir + indir_sub
        files = []
Esempio n. 59
0
    if os.path.isfile(interppath):
        '''
        print('Interpolated data found at: %s' %interppath)
        interpdict = np.load(interppath).item()   # load here the above pickle
        ncdict = wrf.extract_vars(wrfdata, None, ('GRNHFX'))
        '''

        print('Interpolated data found at: %s' % interppath)
        interpfile = open(interppath, 'rb')
        interpdict = pickle.load(interpfile)  # load here the above pickle

    else:
        print(
            'WARNING: no interpolated data found - generating: SLOW ROUTINE!')
        wrfpath = plume.wrfdir + 'wrfout_' + Case
        wrfdata = netcdf.netcdf_file(wrfpath, mode='r')
        ncdict = wrf.extract_vars(wrfdata,
                                  None, ('GRNHFX', 'W', 'QVAPOR', 'T', 'PHB',
                                         'PH', 'U', 'P', 'PB', 'V', 'tr17_1'),
                                  meta=False)
        ncdict['PM25'] = ncdict.pop('tr17_1')

        #get height and destagger vars
        zstag = (ncdict['PHB'] + ncdict['PH']) // 9.81
        z = wrf.destagger(zstag, 1)
        u = wrf.destagger(ncdict['U'], 3)
        w = wrf.destagger(ncdict['W'], 1)
        v = wrf.destagger(ncdict['V'], 2)

        #list variables to interpolate
        nT, nZ, nY, nX = np.shape(z)
Esempio n. 60
0
def test_read_example_data():
    # read any example data files
    for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
        f = netcdf_file(fname, 'r')
        f = netcdf_file(fname, 'r', mmap=False)