示例#1
0
 def loadFile(self):
     if self.extension in ('x', 'x.gz'):
         self.traj = bi.AmberCrdParser(self.fname, self.pdb, 1)
         self.traj.crd.readline()  #skip first line
         self.nextFunction = self.traj.nextFrame
     elif self.extension in ('nc', 'netcdf'):
         try:
             import scipy.io.netcdf as ncdf
             self.traj = ncdf.NetCDFFile(self.fname,
                                         'r').variables['coordinates']
             self.nextFunction = self.returnFrameFromNetcdf
         except ImportError:
             raise TrajFileError, "Can't read NetCDF trajectory"
     elif self.extension in ('dcd', ):
         from NamdDCDParser import NamdDCDParser
         self.traj = NamdDCDParser(self.fname, self.pdb, box=1)
         self.nextFunction = self.traj.read_dcdstep
示例#2
0
def getvar(fname, varname, npf, index, scale_factor):
    usescipy = False
    try:
        import Scientific.IO.NetCDF as netcdf
    except ImportError:
        import scipy
        from scipy.io import netcdf
        usescipy = True
    if (usescipy):
        nffile = netcdf.netcdf_file(fname,"r",mmap=False)
        var = nffile.variables[varname]
        varvals = var[0:npf,index].copy() * scale_factor    #works for vector only?
        nffile.close()
    else:
        nffile = netcdf.NetCDFFile(fname,"r")
        var = nffile.variables[varname]
        varvals = var.getValue()[0:npf,index] * scale_factor
        nffile.close()
    return varvals
示例#3
0
def read_nc_head(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    vers = [
        ''.join([
            ii.decode('UTF-8')
            for ii in list(rootgrp.variables['vgosDB_Version'])
        ])
    ]
    stations = [
        ''.join([ii.decode('UTF-8') for ii in k])
        for k in list(rootgrp.variables['StationList'])
    ]
    stations = [station.strip() for station in stations]
    sour = [
        ''.join([ii.decode('UTF-8') for ii in k])
        for k in list(rootgrp.variables['SourceList'])
    ]
    rootgrp.close()
    return vers, stations, sour
def write_netcdf(variables, dimensions, attributes, filename):
    f = S.NetCDFFile(filename, mode='w')
    for d in dimensions:
        f.createDimension(d['name'], d['length'])
    var = [None] * len(variables)
    for i, v in enumerate(variables):
        var[i] = f.createVariable(v['name'], v['format'], v['dimension_tuple'])
        var[i].units = v['units']
        var[i].description = v['description']
        if 'missing_value' in v:
            var[i].missing_value = v['missing_value']
            if np.ma.is_masked(v['data']):
                v['data'] = v['data'].filled(fill_value=v['missing_value'])
        var[i][:] = v['data']
    for a in attributes:
        f.description = a['description']
        f.history = a['history']
        if 'TimeZone' in a:
            f.TimeZone = a['TimeZone']
    f.close
示例#5
0
    def get_projection(self):
        """ Returns GeographicSystem and MapProjection instances from
            lmatools.coordinateSystems corresponding
            to the coordinate system specified by the metadata of the
            first NetCDF file in self._filenames.
        """
        from lmatools.coordinateSystems import GeographicSystem, MapProjection
        geosys = GeographicSystem()

        f = nc.NetCDFFile(self._filenames[0])

        # Surely someone has written an automated library to parse coordinate
        # reference data from CF-compliant files.
        if 'Lambert_Azimuthal_Equal_Area' in list(f.variables.keys()):
            nc_proj = f.variables['Lambert_Azimuthal_Equal_Area']
            proj_name = 'laea'
            ctrlon, ctrlat = (
                nc_proj.longitude_of_projection_origin,
                nc_proj.latitude_of_projection_origin,
            )
            try:
                ctralt = nc_proj.altitude_of_projection_origin
            except AttributeError:
                print(
                    "No altitude attribute in NetCDF projection data, setting to 0.0"
                )
                ctralt = 0.0
            mapproj = MapProjection(proj_name,
                                    ctrLat=ctrlat,
                                    ctrLon=ctrlon,
                                    lat_0=ctrlat,
                                    lon_0=ctrlon)
            # print geosys.fromECEF(*mapproj.toECEF((0,0), (0,0), (0,0)))
            return geosys, mapproj
        else:
            print("projection not found")
            return geosys, None
示例#6
0
文件: Era_5.py 项目: Cgadal/Wind_data
 def Load_netcdf(self, name_files, save_to_npy=False):
     self.Uwind = []
     self.Vwind = []
     self.time = []
     self.file_names = name_files
     for i, file in enumerate(name_files):
         file_temp = netcdf.NetCDFFile(file, 'r', maskandscale=True)
         self.Uwind.append(
             np.moveaxis(np.copy(file_temp.variables['u10'][:]), 0, -1))
         self.Vwind.append(
             np.moveaxis(np.copy(file_temp.variables['v10'][:]), 0, -1))
         self.time.append(np.copy(file_temp.variables['time'][:]))
         if i == 0:
             self.latitude = np.copy(file_temp.variables['latitude'][:])
             self.longitude = np.copy(file_temp.variables['longitude'][:])
         file_temp.close()
     #
     self.Uwind, self.Vwind, self.time = np.concatenate(
         self.Uwind,
         axis=-1), np.concatenate(self.Vwind,
                                  axis=-1), np.concatenate(self.time,
                                                           axis=-1)
     self.time = Convert_time(self.time.astype(np.float64))
     self.Save_basic()
示例#7
0
def read_qcode(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    qcode = rootgrp.variables['QualityCode'].data
    rootgrp.close()
    return qcode
    return rootgrp.variables.keys()
示例#8
0
def read_nc_A_ant(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    axis_type = rootgrp.variables['AntennaAxisType'].data
    axis_offset = rootgrp.variables['AntennaAxisOffset'].data
    rootgrp.close()
    return axis_type, axis_offset
示例#9
0
def read_nc_O_RF(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    RefFreq = rootgrp.variables['RefFreq'].data
    rootgrp.close()
    return RefFreq
示例#10
0
def read_nc_CR_sour(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    obs2sour = rootgrp.variables['Scan2Source'].data
    rootgrp.close()
    return obs2sour
示例#11
0
def read_nc_O_time(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    YMDHM = rootgrp.variables['YMDHM'].data
    second = rootgrp.variables['Second'].data
    rootgrp.close()
    return YMDHM, second
示例#12
0
from scipy.io import netcdf
import numpy as np
import pandas as pd

wsi_1_file = netcdf.NetCDFFile(f"./fuer_yorick/data/wsi_1-5_1_remap.nc", "r", mmap=True)
print(wsi_1_file.variables)
# OrderedDict([
# ('time', <scipy.io.netcdf.netcdf_variable object at 0x7f0f7810c9e8>),
# ('lon', <scipy.io.netcdf.netcdf_variable object at 0x7f0f7810c978>),
# ('lat', <scipy.io.netcdf.netcdf_variable object at 0x7f0f7810cac8>),
# ('lev', <scipy.io.netcdf.netcdf_variable object at 0x7f0f7810cb70>),
# ('hyai', <scipy.io.netcdf.netcdf_variable object at 0x7f0f7810cc88>),
# ('hybi', <scipy.io.netcdf.netcdf_variable object at 0x7f0f7810cd30>),
# ('hyam', <scipy.io.netcdf.netcdf_variable object at 0x7f0f7810cdd8>),
# ('hybm', <scipy.io.netcdf.netcdf_variable object at 0x7f0f7810cef0>),
# ('var1', <scipy.io.netcdf.netcdf_variable object at 0x7f0f78114048>),
# ('var2', <scipy.io.netcdf.netcdf_variable object at 0x7f0f781140f0>),
# ('var3', <scipy.io.netcdf.netcdf_variable object at 0x7f0f78114198>),
# ('var4', <scipy.io.netcdf.netcdf_variable object at 0x7f0f78114240>),
# ('var5', <scipy.io.netcdf.netcdf_variable object at 0x7f0f781142e8>)
# ])

time = wsi_1_file.variables['time']
print(time.units)
print(time.shape)
print(time[0])
print(time[-1])

lat = wsi_1_file.variables['lat']
print(lat.units)
print(lat.shape)
def load_sat_chl(time_idx, stride=50):
    '''
    corners is north_start,north_end,easting_start,easting_end
    :param time_idx:
    :param stride:
    :param corners:
    :return:
    '''
    ds = netcdf.NetCDFFile("chl_data.nc")
    vars = ds.variables

    chl = vars["chlorophyll"].data
    times = vars["time"].data
    lats = vars["latitude"].data
    longs = vars["longitude"].data
    # On the chl data the order is time, altitude, lat, long
    assert 0 <= time_idx <= times.shape[0]
    des_time = times[time_idx]

    r = uncache(des_time)
    if r is not None:
        return r

    utm_zones_and_data = dict()

    for i, lat in enumerate(lats):
        for j, long in enumerate(longs):
            cur_chl = chl[time_idx, 0, i, j]
            northing, easting, zone_number, zone_letter = utm.from_latlon(
                lat, -1 * (360 - long))
            # if (prev_zone_letter is not None and prev_zone_letter != zone_letter) or (prev_zone_number is not None and prev_zone_number != zone_number):
            #     continue
            utm_key = (zone_number, zone_letter)
            if utm_key not in utm_zones_and_data:
                utm_zones_and_data[utm_key] = ([], [])  # xs and ys
            if cur_chl > 0.0:
                utm_zones_and_data[utm_key][0].append(
                    [northing / 5000, easting / 5000])
                utm_zones_and_data[utm_key][1].append(cur_chl)

    utm_zones_and_data = {
        k: (np.array(v[0]), np.array(v[1]))
        for k, v in utm_zones_and_data.items()
    }
    best_key = None
    most_good_values = float("-inf")
    for key, value in utm_zones_and_data.items():
        if value[1].shape[0] > most_good_values:
            best_key = key
            most_good_values = value[1].shape[0]

    data = utm_zones_and_data[best_key]
    plt.ioff()
    plt.figure()
    X = data[0]
    Y = data[1]
    X = X[::stride, :]
    Y = Y[::stride]
    Y = (Y - np.mean(Y)) / np.std(Y)
    xmin = np.min(X[:, 0])
    ymin = np.min(X[:, 1])
    X -= np.array([xmin, ymin])

    #keep_indexes = []
    # for i in range(X.shape[0]):
    #     if xmin + corners[0] <= X[i, 0] <= min(xmin + corners[1], xmax) and ymin + corners[0] <= X[i, 1] <= min(
    #         ymin + corners[1], ymax):
    #         keep_indexes.append(i)
    # keep_indexes = np.array(keep_indexes)
    # X = X[keep_indexes,:]
    # Y = Y[keep_indexes]

    print(X.shape)
    # plt.scatter(data[0][:,0],data[0][:,1],c=data[1])
    # plt.colorbar()
    # plt.show()
    model = TorchExactGPBackedDataModel(X, Y, "default")
    model.fit(1 * 10**3)
    workspace = RectangularPlaneWorkspace(np.min(X[:, 0]), np.max(X[:, 0]),
                                          np.min(X[:, 1]), np.max(X[:, 1]))
    fname = cache_name(des_time)
    with open(fname + "w.pkl", "wb") as f:
        pickle.dump(workspace, f)
        pickle.dump(X, f)
        pickle.dump(Y, f)
    model.save(fname)
    ds.close()
    return model, workspace
        print "\nWork on hour:", h
        mem_percent.append(psutil.virtual_memory().percent)
        mem_used.append(psutil.virtual_memory().free)
        timerH = datetime.now()
        # Iniitialize the arrays with the first date (if you don't copy the
        # variable then the values are shared between max and min
        # for some pythonic reason)
        firstDATE = DATES[0]
        H = get_HRRR(datetime(firstDATE.year, firstDATE.month, firstDATE.day, h))
        maxH = H['value'].copy()
        minH = H['value'].copy()
        sumH = H['value'].copy(); count = 1

        # Create the NetCDF file if it hasn't been created yet
        if created_NC is False:
            f = netcdf.NetCDFFile('MP_MaxMinMean_hourly_'+var_name+'.nc', 'w')
            f.createDimension('x', np.shape(H['value'])[0])
            f.createDimension('y', np.shape(H['value'])[1])
            f.createDimension('t', 24)  # Hours
            f.createDimension('d', 1)   # Date
            f.createDimension('p', 6)   # Percentile categories
            nc_maxH = f.createVariable('max_'+variable, float, ('x', 'y', 't'))
            nc_minH = f.createVariable('min_'+variable, float, ('x', 'y', 't'))
            nc_meanH = f.createVariable('mean_'+variable, float, ('x', 'y', 't'))
            nc_count = f.createVariable('count', 'i', ('t'))
            nc_perC = f.createVariable('percent_compute', 'i', ('p'))
            nc_perH = f.createVariable('percentile', float, ('p', 'x', 'y', 't'))
            created_NC = True

        # Process DATES in chunks with multiprocessing. Each chunk of DATES will
        # be processed on the number of processors we are allowed.
示例#15
0
    else: days = 365

    curr_path = os.path.join(emis_path, KNU_dir, str(yr))
    list_date = list(
        range(matlab.datenum(f'{yr}0101'),
              matlab.datenum(f'{yr}1231') + 1))
    list_date = [str(matlab.datestr(d))
                 for d in list_date]  # '2016-01-01' format
    list_date = [x[:4] + x[5:7] + x[8:]
                 for x in list_date]  #  '20160101' format

    for i, date in enumerate(list_date):
        doy = i + 1
        fname = f'egts3d_l.{yr}.{date[4:8]}.{KNU_dir}.AQFv1.ncf'
        #try:
        ncfile = netcdf.NetCDFFile(
            os.path.join(curr_path, f'NIER_09h_EMIS_{date}', fname), 'r')
        var = list(ncfile.variables.keys())
        emiss_all = np.full((nr, nc, len(var) - 1, 24),
                            np.nan)  # From 01UTC to 00UTC (next day)

        for j in range(1, len(var)):
            temp = ncfile.variables[var[j]]
            data = copy.deepcopy(temp.data)
            # matlab ncread -> (174, 128, 15, 22)
            # pyhton netcdf read -> (22, 15, 128, 174) = (TSTEP, LAY, ROW, COL)
            data = np.transpose(data, (3, 2, 1, 0))  # now, (174, 128, 15, 22)
            #ncfile.close()
            data = np.rot90(data)
            emiss_all[:, :, j - 1, 2:] = np.float64(
                np.squeeze(data[:, :, 0, :]))  # vertical layer : 1
def plot_P(inputs):
    """
    Plot the different percentages for each hour
    """
    V = inputs[0]  # variable
    m = inputs[1]  # map object
    h = inputs[2]  # hour (UTC)
    #
    variable = var_dict[V]['variable']
    var_name = var_dict[V]['name']
    var_title = var_dict[V]['title']
    var_units = var_dict[V]['units']
    vrange = var_dict[V]['range']
    cmap = var_dict[V]['cmap']
    offset = var_dict[V]['offset']
    #
    SAVE = '/uufs/chpc.utah.edu/common/home/u0553130/public_html/PhD/HRRR/percentile/%s/' % (
        var_name)
    if not os.path.exists(SAVE):
        # make the SAVE directory
        os.makedirs(SAVE)
        # then link the photo viewer
        photo_viewer = '/uufs/chpc.utah.edu/common/home/u0553130/public_html/Brian_Blaylock/photo_viewer/photo_viewer.php'
        os.link(photo_viewer, SAVE + 'photo_viewer.php')
    #
    #nc = netcdf.NetCDFFile('MP_MaxMinMean_hourly_'+var_name+'.nc', 'r')
    nc = netcdf.NetCDFFile(
        '/uufs/chpc.utah.edu/common/home/horel-group/archive/HRRR/MaxMinMeanPerc_v1_20150418-20170331/'
        + var_name + '.nc', 'r')

    Ps = [1, 5, 10, 90, 95, 99]
    lat = nc.variables['latitude'].data
    lon = nc.variables['longitude'].data
    x, y = m(lon, lat)
    #
    cb_ranges = True
    #
    for i in range(len(Ps)):
        plt.figure(i + h)
        plt.clf()
        plt.cla()
        m.drawcoastlines()
        m.drawcountries()
        m.drawstates()
        if V == "mslp":
            P = nc.variables['percentile'][i, :, :, h] / 100
        else:
            P = nc.variables['percentile'][i, :, :, h] - offset
        if cb_ranges is True:
            m.pcolormesh(x, y, P, cmap=cmap, vmin=vrange[0], vmax=vrange[1])
        else:
            m.pcolormesh(x, y, P, cmap=cmap)
        plt.title('HRRR %s %02d percentile (UTC: %02d)' %
                  (var_title, Ps[i], h))
        cb = plt.colorbar(orientation='horizontal', shrink=.9, pad=.05)
        cb.set_label('%s %s' % (var_title, var_units))
        plt.xlabel(str(DATES[0]) + ' - ' + str(DATES[-1]))
        print 'Hour:', h, 'Percentile:', Ps[i]
        plt.savefig(SAVE + 'hrrr_%s_%02dth_h%02d.png' % (var_name, Ps[i], h),
                    bbox_inches='tight',
                    dpi=500)
示例#17
0
def main():
    folderMain = os.path.dirname(os.path.realpath(__file__))
    folderData = folderMain
    data = netcdf.NetCDFFile(os.path.join(folderData, "mov0235_ALL_01-_.nc"),
                             'r')

    # Time
    t = data.variables["TIME"][:] * 1

    # Range of x, y and z values
    x = data.variables["X"][:] * 1
    y = data.variables["Y"][:] * 1
    z = data.variables["Z"][:] * 1

    # Range of x, y, and z values as vectors
    # E.g. [0, 0, 20] and [0, 0, 40]
    x2 = data.variables["X2"][:] * 1
    y2 = data.variables["Y2"][:] * 1
    z2 = data.variables["Z2"][:] * 1

    # Velocities
    u = data.variables["U"][:] * 1
    v = data.variables["V"][:] * 1
    w = data.variables["W"][:] * 1

    # Potential temperature
    theta = data.variables["THETA"][:] * 1

    # Moisture variables
    q1 = data.variables["Q01"][:] * 1  # Vapour, qv (Specific humidity)
    q2 = data.variables["Q02"][:] * 1  # Liquid, ql (Specific humidity)
    q3 = data.variables[
        "Q03"][:] * 1  # Radiative tracer, q (timescale, tau=15min)
    q4 = data.variables[
        "Q04"][:] * 1  # Radiative tracer for deep convection, q (timescale, tau=35min)

    for variable in data.variables:
        print variable

    data.close()

    print "t: {}".format(len(t))
    print t

    # print "x: {}".format(len(x))
    # print x

    # print "y: {}".format(len(y))
    # print y

    # print "z: {}".format(len(z))
    # print z

    # print "x2: {}".format(len(x2))
    # print x2

    # print "y2: {}".format(len(y2))
    # print y2

    # print "z2: {}".format(len(z2))
    # print z2

    print "u: {}".format(len(u[0][0][0]))
    print "v: {}".format(len(v))
    print "w: {}".format(len(w))

    print "Moisture and tracers:"
    print q1[0][100][100]
    print q2[0][100][100]
    print q3[0][100][100]
    print q4[0][100][100]
def calc_stats():
    for V in var_dict:

        variable = var_dict[V]['variable']
        var_name = var_dict[V]['name'] + '_TEST'
        var_title = var_dict[V]['title']
        var_units = var_dict[V]['units']
        vrange = var_dict[V]['range']
        cmap = var_dict[V]['cmap']
        offset = var_dict[V]['offset']

        print "\n", variable, var_name, var_title, var_units

        # The NetCDF file we want to create hasn't been made yet
        created_NC = False

        # multiprocessing :)
        cpu_count = multiprocessing.cpu_count() - 1

        # Hours to get
        hours = range(24)

        timer_hours = []
        timer_chunks = []
        timer_dwnld = []
        timer_numpy = []
        mem_percent = []
        mem_used = []
        size_result = []

        for h in hours:
            try:
                del sumH
                del maxH
                del minH
                del perH
            except:
                pass

            print "\nWork on hour:", h
            mem_percent.append(psutil.virtual_memory().percent)
            mem_used.append(psutil.virtual_memory().free)
            timerH = datetime.now()
            # Iniitialize the arrays with the first date (if you don't copy the
            # variable then the values are shared between max and min
            # for some pythonic reason)
            firstDATE = DATES[0]
            H = get_HRRR(
                datetime(firstDATE.year, firstDATE.month, firstDATE.day, h))
            maxH = H['value'].copy()
            minH = H['value'].copy()
            sumH = H['value'].copy()
            count = 1

            # Create the NetCDF file if it hasn't been created yet
            if created_NC is False:
                f = netcdf.NetCDFFile(
                    'MP_MaxMinMean_hourly_' + var_name + '.nc', 'w')
                f.createDimension('x', np.shape(H['value'])[0])
                f.createDimension('y', np.shape(H['value'])[1])
                f.createDimension('t', 24)  # Hours
                f.createDimension('d', 1)  # Date
                f.createDimension('p', 6)  # Percentile categories
                nc_maxH = f.createVariable('max_' + variable, float,
                                           ('x', 'y', 't'))
                nc_minH = f.createVariable('min_' + variable, float,
                                           ('x', 'y', 't'))
                nc_meanH = f.createVariable('mean_' + variable, float,
                                            ('x', 'y', 't'))
                nc_count = f.createVariable('count', 'i', ('t'))
                nc_perC = f.createVariable('percent_compute', 'i', ('p'))
                nc_perH = f.createVariable('percentile', float,
                                           ('p', 'x', 'y', 't'))
                created_NC = True

            # Process DATES in chunks with multiprocessing. Each chunk of DATES will
            # be processed on the number of processors we are allowed.
            # We do this in chunks so we don't return all the data at once, loading
            # everything into memory. So just load arrays from each chunk, thus saving
            # memory :) The majority of the time processing is in downloading, anyways,
            # not the numpy functions.
            # ## Still need an efficient way to compute percentiles (90th, 95th, 99th)
            # If you have lots of memory, go ahead and set i_have_lots_of_memory to
            # more than 1, else, set it to 1, which chunks the data based on the num of
            # processors you are allowing.
            # Number of days in a chunk = i_have_lots_of_memory * cpu_count
            i_have_lots_of_memory = 24
            chunks = range(len(DATES))[1::cpu_count * i_have_lots_of_memory]
            chunks.append(len(DATES))
            chunks = [0, len(DATES)]
            for i in range(len(chunks) - 1):
                timerC = datetime.now()

                # The chunk of dates the processors will work on for this loop
                chunk_DATES = DATES[chunks[i]:chunks[i + 1]]

                # Add the hour to each date, and pass datetime object to multipro
                chunk_DATETIMES = [
                    datetime(D.year, D.month, D.day, h) for D in chunk_DATES
                ]
                timerD = datetime.now()
                p = multiprocessing.Pool(cpu_count)
                result = p.map(get_HRRR_value, chunk_DATETIMES)
                print "got multiprocessing data...",
                p.close()
                print "closed processors"
                timer_dwnld.append(datetime.now() - timerD)

                # Remove empty arrays if any exist
                print "remove empty arrays...",
                #empty = [e for e in range(len(result)) if result[e] is None]
                #if len(empty) > 0:
                #    offset = range(len(empty))
                #    for E in range(len(empty)):
                #        # adjust by the offset, becuase pop changes index number
                #        result.pop(empty[E]-offset[E])
                result = [x for x in result if x is not None]
                print "done!"

                print "convert result to numpy array...",
                result = np.array(result)
                size_result.append(result.nbytes)
                print "done!"

                timerN = datetime.now()
                # Percentiles, if the entire hour was calculated in the same results
                if len(chunk_DATES) >= len(DATES):
                    timerP = datetime.now()
                    # remember to consider the first hour, currently in minH/maxH/sumH
                    print "calculate percentiles..."
                    ALL = np.vstack([[minH], result])
                    percentiles = [1, 5, 10, 90, 95, 99]
                    perH = np.percentile(ALL, percentiles, axis=0)
                    print 'done!'
                    timeP = datetime.now() - timerP

                # Use numpy arrays to find max, min, sum
                # First find min, max, sum of the result array
                minR = np.min(result, axis=0)
                maxR = np.max(result, axis=0)
                sumR = np.sum(result, axis=0)

                # Then, combine the result array to the previous (this two step
                # process is faster than doing a np.dstack before finding min/max/sum)
                minH = np.min([minR, minH], axis=0)
                maxH = np.max([maxR, maxH], axis=0)
                sumH = np.sum([sumR, sumH], axis=0)
                count += np.shape(result)[0]
                timer_numpy.append(datetime.now() - timerN)
                timer_chunks.append(datetime.now() - timerC)

            nc_maxH[:, :, h] = maxH
            nc_minH[:, :, h] = minH
            nc_meanH[:, :, h] = sumH / count
            nc_count[h] = count
            if 'perH' in locals().keys():
                nc_perC[:] = percentiles
                nc_perH[:, :, :, h] = perH

            timer_hours.append(datetime.now() - timerH)

        f.history = 'HRRR Hourly Max/Min/Mean Climatology for ' + variable

        latH = f.createVariable('latitude', float, ('x', 'y'))
        lonH = f.createVariable('longitude', float, ('x', 'y'))
        latH[:] = H['lat']
        lonH[:] = H['lon']

        begD = f.createVariable('Begin Date', 'i', ('d'))
        endD = f.createVariable('End Date', 'i', ('d'))
        begD[:] = int(DATES[0].strftime('%Y%m%d%H'))
        endD[:] = int(DATES[-1].strftime('%Y%m%d%H'))

        f.close()

        print "==========================================================="
        print "total time:", datetime.now() - timer1
        print ""
        print "mean hour (seconds):", np.mean(
            [i.seconds + i.microseconds / 1000000. for i in timer_hours])
        print "mean chunk (seconds):", np.mean(
            [i.seconds + i.microseconds / 1000000. for i in timer_chunks])
        print "mean dwnld (seconds):", np.mean(
            [i.seconds + i.microseconds / 1000000. for i in timer_dwnld])
        print "mean numpy (seconds):", np.mean(
            [i.seconds + i.microseconds / 1000000. for i in timer_numpy])
    """!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def main():
    args = argumentParser()
    #should be removed in the normal version
    #args.Input_Format, args.Scale_Factor, args.Bit_Depth, args.Image_Dimension, args.Output_Format, args.Use_CPU, args.Use_GAN='.mat', '16', 'uint8', '3D', '.tif', 'yes', 'yes'
    #flatFlag, restore, args.Input_Images, numBits = None, None, './srtestfolder/', 0.0

    cpuFlag = args.Use_CPU
    if cpuFlag == 'yes':
        cpuFlag = True
    else:
        cpuFlag = False

    ganFlag = args.Use_GAN
    if ganFlag == 'yes':
        ganFlag = True
    else:
        ganFlag = False
    datasetBitDepth = 'uint8'
    #    datasetBitDepth=args.Bit_Depth
    dim = args.Model_Type
    patchFlag = args.Use_3D_Patches
    if patchFlag == 'yes':
        patchFlag = True
    else:
        patchFlag = False

#    photoFlag=args.Use_PhotoSR
#    if photoFlag== 'yes':
#        photoFlag=True
#    else:
#        photoFlag=False
    '''
    TESTING
    '''

    # argument parsing
    #    if testFlag:
    #        ganFlag=False

    outputFlag = args.Output_Format
    finalScaleFactor = str2int(args.Scale_Factor)

    Scale_Factor = 4

    numFilters = 64
    activation = 'prelu'
    residual_blocks = 16
    batchNorm = False
    if datasetBitDepth == 'uint8':
        numBits = 127.5

    if dim == '2D Rock':
        flatFlag = True
        if ganFlag:
            restore = './validatedCheckpoints/SRGAN2DRock.ckpt'
        else:
            restore = './validatedCheckpoints/SRCNN2DRock.ckpt'

    if dim == '2D Augmented Rock':
        flatFlag = True
        if ganFlag:
            restore = './validatedCheckpoints/SRGAN2DAugRock.ckpt'
        else:
            restore = './validatedCheckpoints/SRCNN2DAugRock.ckpt'

    if dim == '2D Photo':
        flatFlag = True
        if ganFlag:
            restore = './validatedCheckpoints/SRGAN2DRock.ckpt'
        else:
            restore = './validatedCheckpoints/photoSR.ckpt'
    elif dim == '3D Rock':
        flatFlag = False
        if ganFlag:
            restore = './validatedCheckpoints/SRCNN3DRock.ckpt'
        else:
            restore = './validatedCheckpoints/SRCNN3DRock.ckpt'
            #restore=args.Network_Weights

    # define the models
    if flatFlag:
        outputShape = [None, None, None, 3]  #(1, Nx, Ny, 3)RGB
    else:
        outputShape = [None, None, None, None, 1]  #(1, Nx, Ny, Nz, 1)
    # the ground truth image tensor
    t_target_image = tf.placeholder('float32',
                                    outputShape,
                                    name='t_HR_target_image')
    if flatFlag:  # the generator is always fully convolutional
        inputShape = [None, None, None, 3]
    else:
        inputShape = [None, None, None, None, 1]
    inputTensor = tf.placeholder('float32',
                                 inputShape,
                                 name='t_LR_image_input_to_SR_generator')
    #TODO: add option for argument to handle different models
    # pass to the generator model
    if flatFlag:  # 2D EDSR model (built on TL)
        net_gen = generator2D(inputTensor,
                              Scale_Factor,
                              residual_blocks,
                              numFilters,
                              3,
                              initKernelFactor=1,
                              activation=activation,
                              batchNorm=batchNorm,
                              reuse=False)
        net_gen_data = net_gen
        net_gen = net_gen_data.outputs
    else:  #3d models
        net_gen = generatorTF(inputTensor, 3, residual_blocks, 64,
                              Scale_Factor, False, True)

    #g_vars = tl.layers.get_variables_with_name('SRGAN_g', True, True)

    g_vars = [var for var in tf.trainable_variables() if 'SRGAN_g' in var.name]
    # Resources
    if cpuFlag:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    else:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 1
    session = tf.Session(config=config)
    session.run(tf.initialize_all_variables())
    # load weights
    # pdb.set_trace()
    saver = tf.train.Saver(g_vars, max_to_keep=10000)
    if restore is not None:
        saver.restore(session, restore)
    else:
        print('No Checkpoints Requested')
    outdir = args.Input_Images + '/srOutputs'
    os.makedirs(outdir, exist_ok=True)
    # detect the files within the folder % accept png, tiff, and nc files.
    files = glob.glob(os.path.join(args.Input_Images, '*'), recursive=True)

    for fname in files:
        if os.path.isfile(fname):
            print(fname)
            # find the extension
            folder, name = os.path.split(fname)
            name, ext = os.path.splitext(name)

            # load the file
            print('Loading File ' + fname)

            if ext == '.png' or ext == '.jpg' or ext == '.jpeg':
                lr = np.array(Image.open(fname).convert('RGB'),
                              dtype=datasetBitDepth)
                #if lr.mode != 'RGB':
                #lr = lr.convert('RGB')
                #lr = np.array(lr, dtype=datasetBitDepth)# (Nx, Ny, 3 or 1), dtype = uint8
                if len(lr.shape) == 2:  # if 2D, make into RGB
                    lr = np.concatenate(
                        (lr, lr, lr),
                        2)  # just make all images in the same format
                if lr.shape[2] == 3:  # if RGB, make into BHWC
                    lr = np.expand_dims(lr, 0)  #2D sahpe = (1, )
                if not flatFlag:  #3D if 3F, make into BHWDC
                    lr = np.expand_dims(np.expand_dims(lr, 3), 0)
                    print(
                        'WARNING: Attempting to super resolve a 2D image with 3D network'
                    )
                #lr = lr[:,0:100, 0:100, :]

            elif ext == '.mat':

                f = h5py.File(fname)
                for k, v in f.items():
                    lr = np.array(v)
                if len(lr.shape) == 2:
                    lr = np.concatenate(
                        (lr, lr, lr),
                        2)  # just make all images in the same format
                if lr.shape[2] == 3:
                    lr = np.expand_dims(lr, 0)
                else:
                    lr = np.expand_dims(np.expand_dims(lr, 3), 0)

            elif ext == '.nc':
                file2read = netcdf.NetCDFFile(fname, 'r')
                temp = file2read.variables['tomo']
                lr = temp[:]
                file2read.close()
                lr = np.expand_dims(np.expand_dims(lr, 3), 0)  #2D and 3D

            elif ext == '.tif':
                lr = imread(fname)
                if len(lr.shape) == 2:  #2D and 3D
                    lr = np.concatenate(
                        (lr, lr, lr),
                        2)  # just make all images in the same format
                if lr.shape[2] == 3:
                    lr = np.expand_dims(lr, 0)
                else:
                    lr = np.expand_dims(np.expand_dims(lr, 3), 0)

            # run contrast adjustment assuming inscribed cylindrical core sample.
            if lr.max() > 255 or lr.min() < 0.0:

                lr = np.array(lr, dtype='float32')
                minVal = lr.min()
                maxVal = lr.max()
                nx = lr.shape[1]
                ny = lr.shape[2]
                truncMax = np.quantile(
                    lr[:,
                       int(nx * 0.2):int(nx * 0.8),
                       int(ny * 0.2):int(ny * 0.8), :], 0.99)
                truncMin = np.quantile(
                    lr[:,
                       int(nx * 0.2):int(nx * 0.8),
                       int(ny * 0.2):int(ny * 0.8), :], 0.01)
                print(
                    'WARNING: image is not within network bounds, performing automatic contrast adjustment (assuming inscribed cylinder) to '
                    + str(truncMax) + ' - ' + str(truncMin))
                lr[lr < truncMin] = truncMin
                lr[lr > truncMax] = truncMax

                lr = (lr - truncMin) / (truncMax - truncMin) * 255
            # generate SR
            lr = np.array(lr, dtype='float32')
            print('Super Resolving File ' + fname)
            lr = (lr - numBits) / numBits
            numLoops = int(np.log(finalScaleFactor) / np.log(Scale_Factor))
            print(f'Super resolution 4x will run for {numLoops} iterations')
            for n in range(numLoops):
                if flatFlag and lr.shape[3] > 3:
                    print(
                        'Performing Pseudo Super Resolution on 3D image with 2D network'
                    )
                    lr = np.transpose(lr, (0, 3, 2, 1, 4))
                    temp = np.zeros(
                        (lr.shape[0], lr.shape[1] * Scale_Factor,
                         lr.shape[2] * Scale_Factor, lr.shape[3], lr.shape[4]))

                    for i in range(lr.shape[3]):
                        print("\rSuper Resolving Slice %d" % (i + 1))
                        lrSlice = lr[:, :, :, i, :]
                        lrSlice = np.squeeze(lrSlice)
                        lrSlice = np.expand_dims(lrSlice, 2)
                        lrSlice = np.expand_dims(
                            np.concatenate((lrSlice, lrSlice, lrSlice), 2), 0)
                        sr = session.run(net_gen, {inputTensor: lrSlice})
                        sr = (sr + 1) * numBits
                        temp[:, :, :, i, :] = np.expand_dims(sr[:, :, :, 0], 3)
#                    stdout.flush()
#                    stdout.write("\n")
                    temp = np.transpose(temp, (0, 3, 2, 1, 4))
                    temp2 = np.zeros((temp.shape[0], temp.shape[1],
                                      temp.shape[2] // Scale_Factor,
                                      temp.shape[3], temp.shape[4]))
                    for i in range(temp.shape[3]):
                        print("\rCompressing Slice %d" % (i + 1))
                        compSlice = np.array(
                            Image.fromarray(np.squeeze(
                                temp[:, :, :, i, :])).resize(
                                    (temp.shape[1],
                                     temp.shape[2] // Scale_Factor),
                                    PIL.Image.BICUBIC))
                        temp2[:, :, :,
                              i] = np.expand_dims(np.expand_dims(compSlice, 0),
                                                  3)
#                    stdout.flush()
#                    stdout.write("\n")
                    del temp
                    temp2 = (temp2 - numBits) / numBits
                    sr3 = np.zeros((lr.shape[0], lr.shape[1] * Scale_Factor,
                                    lr.shape[2] * Scale_Factor,
                                    lr.shape[3] * Scale_Factor, lr.shape[4]))
                    for i in range(lr.shape[3] * Scale_Factor):
                        print("\rSuper Resolving Orthogonal Slice %d" %
                              (i + 1))
                        lrSlice = temp2[:, :, :, i, :]
                        lrSlice = np.squeeze(lrSlice)
                        lrSlice = np.expand_dims(lrSlice, 2)
                        lrSlice = np.expand_dims(
                            np.concatenate((lrSlice, lrSlice, lrSlice), 2), 0)
                        sr = session.run(net_gen, {inputTensor: lrSlice})
                        sr = (sr + 1) * numBits
                        sr3[:, :, :, i, :] = np.expand_dims(sr[:, :, :, 0], 3)
#                    stdout.flush()
#                    stdout.write("\n")
                    sr = sr3
                    del temp2

                elif not flatFlag and patchFlag:
                    print('Performing patch-based 3D super resolution')
                    print(f'The shape of lower resolution is {lr.shape}')
                    patchSize = 16
                    print(
                        f'Super resolution will be applied to patches of size {patchSize}'
                    )
                    numsplitsX = lr.shape[1] // patchSize  #16
                    numsplitsY = lr.shape[2] // patchSize  #16
                    numsplitsZ = lr.shape[3] // patchSize  #16

                    lr = lr[:, 0:patchSize * numsplitsX,
                            0:patchSize * numsplitsY,
                            0:patchSize * numsplitsZ, :]
                    print(f'LR image truncated to {lr.shape}')
                    #plt.figure(1)
                    #plt.imshow(np.squeeze(lr)[:,:,0])

                    lr = np.split(lr, numsplitsX, 1)
                    lr = np.vstack(lr)

                    lr = np.split(lr, numsplitsY, 2)
                    lr = np.vstack(lr)

                    lr = np.split(lr, numsplitsZ, 3)
                    lr = np.vstack(lr)  #(4096, 16, 16, 16, 1)

                    lr_list = list(lr)
                    for i in range(len(lr_list)):
                        tmp = np.expand_dims(lr_list[i],
                                             0)  #(1, 16, 16, 16, 1)
                        #pdb.set_trace()
                        #tmp = tmp.repeat(4, axis = 0).repeat(4, axis = 1).repeat(4, axis = 2)
                        sr = session.run(net_gen, {inputTensor: tmp})
                        sr = (sr + 1) * numBits
                        lr_list[i] = sr
                    sr = np.array(lr_list)  #4096
                    #print(sr.shape)
                    sr = np.split(sr, numsplitsZ, 0)
                    sr = np.concatenate(sr, 4)

                    sr = np.split(sr, numsplitsY, 0)
                    sr = np.concatenate(sr, 3)

                    sr = np.split(sr, numsplitsX, 0)
                    sr = np.concatenate(sr, 2)

                    sr = np.squeeze(sr)

                    #plt.figure(2)
                    #plt.imshow(sr[:,:,0])

                else:
                    if not flatFlag:
                        print('Performing True 3D super Resolution')
                        sr = session.run(net_gen, {inputTensor: lr})
                        sr = (sr + 1) * numBits
                    else:
                        print('Performing 2D super Resolution')
                        sr = session.run(net_gen, {inputTensor: lr})
                        sr = (sr + 1) * numBits
                if n + 1 < numLoops:
                    lr = (sr - numBits) / numBits
            # save
            if outputFlag == 'same':
                outputFormat = ext
            else:
                outputFormat = outputFlag
            #output format convertion
            if outputFormat == '.png':
                #2D only
                #sr=sr.reshape(sr.shape[1],sr.shape[2], 3)
                sr = np.squeeze(sr)
                sr = sr.astype('uint8')
                sr = Image.fromarray(sr)
                sr.save(outdir + '/' + name + '-sr.png')

            elif outputFormat == '.jpg' or outputFormat == '.jpeg':
                #2D only
                sr = np.squeeze(sr)
                sr = sr.astype('uint8')
                sr = Image.fromarray(sr)
                sr.save(outdir + '/' + name + '-sr.jpg')

            elif outputFormat == '.mat':
                #for 2D and 3D
                #sr=sr.reshape(sr.shape[1],sr.shape[2], sr.shape[3])
                sr = np.squeeze(sr)
                matfile = h5py.File(outdir + '/' + name + '-sr.mat', 'w')
                matfile.create_dataset('data', data=sr)

            elif outputFormat == '.nc':
                #for 2D and 3D
                #sr=sr.reshape(sr.shape[1],sr.shape[2], sr.shape[3])
                sr = np.squeeze(sr)
                ncfile = netcdf.netcdf_file(outdir + '/' + name + '-sr.nc',
                                            'w')
                ncfile.createDimension('sr', len(sr))
                #not very sure about the data type, uint8 maybe will be changed
                srvar = ncfile.createVariable('sr', 'uint8', ('sr', ))
                ncfile.close()

            elif outputFormat == '.tif':
                #sr = sr.astype(datasetBitDepth)
                #sr = np.array(np.squeeze(sr.astype('int')))
                head, tail = os.path.split(fname)
                name, ext = os.path.splitext(tail)
                imsave(
                    f'{outdir}/{name}-sr.tif',
                    np.array(np.squeeze(sr.astype('int')),
                             dtype=datasetBitDepth))
示例#20
0
r_error = 0
rg_cum = 0
rg_error = 0
b_obs_ens_1d = 0
b_obs_error = 0
covar_1_ens = 0
covar_1_error = 0
cpr = 1.82
al = 1.5306333
be = 1.213115524
epsilonr = 0.1
k_bT = 1
slicer = 5  #Initial samples to be discarded
trajt = 50
for traj in range(trajt):
    f = netcdf.NetCDFFile('net_%s.nc' % (traj + 101), 'r', mmap=False)
    #########################################################
    #####   READING FROM NetCDF
    #########################################################

    Nsample = f.dimensions['No_of_samples']
    print Nsample
    Ndim = f.dimensions['Ndim']
    NBeads = f.dimensions['NBeads']
    timenet = f.variables['Time']
    time = timenet[:] * 1
    confinet = f.variables['configuration']
    confi_net = confinet[:] * 1
    confi = confi_net[:, :, slicer:Nsample]
    Nsample = Nsample - slicer
    #  gradnet = f.variables['Gradient']
示例#21
0
def read_nc_Eff_Fr(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    Eff_Fr = rootgrp.variables['FreqGroupIono'].data
    rootgrp.close()
    return Eff_Fr
    return rootgrp.variables.keys()
示例#22
0
# station details
good_lat = np.array([54.3715, 54.9200, 54.8830, 54.5150, 54.9000, 54.6711])
good_lon = np.array([-155.0717, -155.2550, -155.9170, -156.2500, -157.3670,
    -157.4156])
good_name = ['LA21', 'LA23', 'LA25', 'LA26', 'LA28', 'LA30']

bad_lat = np.array([53.9855, 54.2920])
bad_lon = np.array([-156.6320, -157.3670])
bad_name = ['LA27', 'LA29']

miss_lat = np.array([54.5674])
miss_lon = np.array([-160.2019])
miss_name = ['LT17']

# read .nc file
file2read = netcdf.NetCDFFile('../EffComp_data/AACSE_etopo1_bedrock.nc','r')
temp = file2read.variables['lat']
lat = temp[:]*1
temp = file2read.variables['lon']
lon = temp[:]*1
temp = file2read.variables['Band1']
elev = temp[:]*1

# plotting
XX, YY = np.meshgrid(lon,lat)

# PLOTTING
fs = 14 # primary fontsize
lw = 3 # primary linewidth
mk = 10 # primary markersize
plt.close('all')
示例#23
0
# -*- coding: utf-8 -*-

from scipy.io import netcdf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt

file2read = netcdf.NetCDFFile('test1.hdf', 'r')
temp = file2read.variables['S']  # var can be 'Theta', 'S', 'V', 'U' etc..
data = temp[:] * 1
file2read.close()

plt.contourf(data[t, z, :, :])
示例#24
0
def read_nc_CR(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    obs2stat1_stat2 = rootgrp.variables['Obs2Baseline'].data
    obs2scan = rootgrp.variables['Obs2Scan'].data
    rootgrp.close()
    return obs2scan, obs2stat1_stat2
            try:
                new = H['value']
                sumH = sumH + new; count += 1
                maxH[new > maxH] = new[new > maxH]
                minH[new < minH] = new[new < minH]
            except:
                # maxH and minH variables are not assigned yet, so make those.
                # (if you don't copy the variable then the values are shared between max and min
                # for some pythonic reason)
                maxH = H['value'].copy()
                minH = H['value'].copy()
                sumH = H['value'].copy(); count = 1
                if created_NC == False:
                    # And Create NetCDF Dimensions and Variables
                    # Create NetCDF file
                    f = netcdf.NetCDFFile('MaxMinMean_hourly_'+var_name+'.nc', 'w')
                    f.createDimension('x', np.shape(H['value'])[0])
                    f.createDimension('y', np.shape(H['value'])[1])
                    f.createDimension('t', 24)
                    nc_maxH = f.createVariable('max_'+variable, float, ('x', 'y', 't'))
                    nc_minH = f.createVariable('min_'+variable, float, ('x', 'y', 't'))
                    nc_meanH = f.createVariable('mean_'+variable, float, ('x', 'y', 't'))
                    created_NC = True
        except:
            print "hour not available", DATE

    nc_maxH[:, :, h] = maxH
    nc_minH[:, :, h] = minH
    nc_meanH[:, :, h] = sumH/count
    del maxH
    del minH
示例#26
0
def write_to_netcdf(filename, tmpfilename, newpreciparray, new_idates,
                    old_idates, all_idates, cid, ens, missingval, fdat,
                    codepath):
    #convert idate lists to matlab datenumbers (days since jan 1 0000)
    oldidns = [float] * len(old_idates)
    newidns = [float] * len(new_idates)
    sdn_out = [float] * len(all_idates)
    edn_out = [float] * len(all_idates)
    idn_out = [float] * len(all_idates)
    for i, idat in enumerate(old_idates):
        oldidns[i] = grid_utils.datetime2matlabdn(idat)
    for i, idat in enumerate(new_idates):
        newidns[i] = grid_utils.datetime2matlabdn(idat)
    for i, idat in enumerate(all_idates):
        f = definitions.forecast_def(idat, fdat, 0)
        sdn_out[i] = grid_utils.datetime2matlabdn(f['sdate'])
        edn_out[i] = grid_utils.datetime2matlabdn(f['edate'])
        idn_out[i] = grid_utils.datetime2matlabdn(f['idate'])

    if os.path.isfile(filename):
        #open netcdf file
        f = S.NetCDFFile(filename, "r")

        #read netcdf data to append
        pcp = f.variables["Forecast Precip"][:]
        idns = f.variables["InitializationDate"][:]

        #Get values from original netcdf file that are not missing
        vals2retain = np.in1d(idns, oldidns)
        pcpold = np.squeeze(pcp[vals2retain, :, :])

        #Concatinate all the data together then sort based on the initialization date
        pcpall = np.concatenate((pcpold, newpreciparray), 0)
        idnall = np.concatenate((oldidns, newidns), 0)
        pcp_out = pcpall[np.argsort(idnall), :, :]
        idn_out2 = np.sort(idnall)
        f.close()

        f2 = S.NetCDFFile(tmpfilename, mode='w')
        f2.createDimension('Number of Catchments', len(cid))
        f2.createDimension('Number of Ensemble Members', len(ens))
        f2.createDimension('Days', len(idn_out))
        fp = f2.createVariable(
            'Forecast Precip', 'f',
            ('Days', 'Number of Ensemble Members', 'Number of Catchments'))
        fcid = f2.createVariable('Catchment ID', 'i',
                                 ('Number of Catchments', ))
        fsd = f2.createVariable('StartDate', 'f', ('Days', ))
        fid = f2.createVariable('InitializationDate', 'f', ('Days', ))
        fed = f2.createVariable('EndDate', 'f', ('Days', ))
        fens = f2.createVariable('Ensembles', 'i',
                                 ('Number of Ensemble Members', ))
        #Add data
        fp[:] = pcp_out[:]
        fcid[:] = cid[:]
        fsd[:] = sdn_out[:]
        fid[:] = idn_out[:]
        fed[:] = edn_out[:]
        fens[:] = ens[:]
        f2.close()
        os.rename(tmpfilename, filename)

    else:
        f = S.NetCDFFile(filename, mode='w')
        f.createDimension('Number of Catchments', len(cid))
        f.createDimension('Number of Ensemble Members', len(ens))
        f.createDimension('Days', len(idn_out))
        fp = f.createVariable(
            'Forecast Precip', 'f',
            ('Days', 'Number of Ensemble Members', 'Number of Catchments'))
        fcid = f.createVariable('Catchment ID', 'i',
                                ('Number of Catchments', ))
        fsd = f.createVariable('StartDate', 'f', ('Days', ))
        fid = f.createVariable('InitializationDate', 'f', ('Days', ))
        fed = f.createVariable('EndDate', 'f', ('Days', ))
        fens = f.createVariable('Ensembles', 'i',
                                ('Number of Ensemble Members', ))
        #Add data
        fp[:] = newpreciparray[:]
        fcid[:] = cid[:]
        fsd[:] = sdn_out[:]
        fid[:] = idn_out[:]
        fed[:] = edn_out[:]
        fens[:] = ens[:]
        f.close()
示例#27
0
    def save(self, filename, format='netcdf'):
        """Save a GMTGrid object to a file.
        :param filename:
          Name of desired output file.
        :param format:
          One of 'netcdf','hdf' or 'native'.
        :raises DataSetException:
          When format not one of ('netcdf,'hdf','native')
        """
        if format not in ['netcdf', 'hdf', 'native']:
            raise DataSetException(
                'Only NetCDF3, HDF (NetCDF4), and GMT native output are supported.'
            )
        if format == 'netcdf':
            f = netcdf.NetCDFFile(filename, 'w')
            m, n = self._data.shape
            dx = f.createDimension('x', n)
            dy = f.createDimension('y', m)
            x = f.createVariable('x', np.float64, ('x'))
            y = f.createVariable('y', np.float64, ('y'))
            x[:] = np.linspace(self._geodict.xmin, self._geodict.xmax,
                               self._geodict.nx)
            y[:] = np.linspace(self._geodict.ymin, self._geodict.ymax,
                               self._geodict.ny)
            z = f.createVariable('z', self._data.dtype, ('y', 'x'))
            z[:] = np.flipud(self._data)
            z.actual_range = np.array(
                (np.nanmin(self._data), np.nanmax(self._data)))
            f.close()
        elif format == 'hdf':
            #Create the file and the top-level attributes GMT wants to see
            f = h5py.File(filename, 'w')
            f.attrs['Conventions'] = 'COARDS, CF-1.5'
            f.attrs['title'] = 'filename'
            f.attrs[
                'history'] = 'Created with python GMTGrid.save(%s,format="hdf")' % filename
            f.attrs['GMT_version'] = 'NA'

            #Create the x array and the attributes of that GMT wants to see
            xvar = np.linspace(self._geodict.xmin, self._geodict.xmax,
                               self._geodict.nx)
            x = f.create_dataset('x',
                                 data=xvar,
                                 shape=xvar.shape,
                                 dtype=str(xvar.dtype))
            x.attrs['CLASS'] = 'DIMENSION_SCALE'
            x.attrs['NAME'] = 'x'
            x.attrs['_Netcdf4Dimid'] = 0  #no idea what this is
            x.attrs['long_name'] = 'x'
            x.attrs['actual_range'] = np.array((xvar[0], xvar[-1]))

            #Create the x array and the attributes of that GMT wants to see
            yvar = np.linspace(self._geodict.ymin, self._geodict.ymax,
                               self._geodict.ny)
            y = f.create_dataset('y',
                                 data=yvar,
                                 shape=yvar.shape,
                                 dtype=str(yvar.dtype))
            y.attrs['CLASS'] = 'DIMENSION_SCALE'
            y.attrs['NAME'] = 'y'
            y.attrs['_Netcdf4Dimid'] = 1  #no idea what this is
            y.attrs['long_name'] = 'y'
            y.attrs['actual_range'] = np.array((yvar[0], yvar[-1]))

            #create the z data set
            z = f.create_dataset('z',
                                 data=np.flipud(self._data),
                                 shape=self._data.shape,
                                 dtype=str(self._data.dtype))
            z.attrs['long_name'] = 'z'
            #zvar.attrs['_FillValue'] = array([ nan], dtype=float32)
            z.attrs['actual_range'] = np.array(
                (np.nanmin(self._data), np.nanmax(self._data)))

            #close the hdf file
            f.close()
        elif format == 'native':
            f = open(filename, 'wb')
            f.write(struct.pack('I', self._geodict.nx))
            f.write(struct.pack('I', self._geodict.ny))
            f.write(struct.pack('I', 0))  #gridline registration
            f.write(struct.pack('d', self._geodict.xmin))
            f.write(struct.pack('d', self._geodict.xmax))
            f.write(struct.pack('d', self._geodict.ymin))
            f.write(struct.pack('d', self._geodict.ymax))
            f.write(struct.pack('d', self._data.min()))
            f.write(struct.pack('d', self._data.max()))
            f.write(struct.pack('d', self._geodict.dx))
            f.write(struct.pack('d', self._geodict.dy))
            f.write(struct.pack('d', 1.0))  #scale factor to multiply data by
            f.write(struct.pack('d', 0.0))  #offfset to add to data
            f.write(struct.pack('80s', b'X units (probably degrees)'))
            f.write(struct.pack('80s', b'Y units (probably degrees)'))
            f.write(struct.pack('80s', b'Z units unknown'))
            f.write(struct.pack('80s', b''))  #title
            f.write(
                struct.pack(
                    '320s',
                    b'Created with GMTGrid() class, a product of the NEIC.')
            )  #command
            f.write(struct.pack('160s', b''))  #remark
            if self._data.dtype not in [
                    np.int16, np.int32, np.float32, np.float64
            ]:
                msg = 'Data type of "%s" is not supported by the GMT native format.'
                raise DataSetException(msg % str(self._data.dtype))
            fpos1 = f.tell()
            #the left-right flip is necessary because of the way tofile() works
            newdata = np.fliplr(np.flipud(self._data[:]))
            newdata.tofile(f)
            fpos2 = f.tell()
            bytesout = fpos2 - fpos1
            f.close()
示例#28
0
def read_nc_delay(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    group_delay = rootgrp.variables['GroupDelay'].data  # in sec
    group_delay_sig = rootgrp.variables['GroupDelaySig'].data  # in sec
    rootgrp.close()
    return group_delay, group_delay_sig
示例#29
0
             strip = True
             Plot_SupTitle = enterbox(msg, title,default,strip)
 
         # Now read NetCDF file and Plot the Radar Rainfall Array
         """
         data = NetCDFFile(filename, netcdf_mode_r)
         print 'VARIABLES:'
         print data.variables        
         The default format for BOM data is Lat Lon,
         
         """
         if pattern == '*.gz':
             #gzip.open(filename)
             filename = gzip.open(filename, 'rb')
             print(filename)
             data = netcdf.NetCDFFile(os.path.join(root,filename), 'r') # RADAR NetCDF files have Dimensions, Attributes, Variables
         else:
             data = netcdf.NetCDFFile(os.path.join(root, filename), 'r') # RADAR NetCDF files have Dimensions, Attributes, Variables
         print('VARIABLES:')
         #print data.variables  
         #print data.__dict__
         print('Reference LAT, LONG = ',data.reference_longitude, data.reference_latitude)
         #print 'ATTRIBUTES:'
         #print data.attributes              
         #raw_input('Hold here... line 217')
         possible_precip_names = ['precipitation',  'precip', 'rain_amount']  # This handles format changes in the files from BOM !!!!
         # Go through each of the possible names
         for name in possible_precip_names:  # Check if name is a key in the variables dictionary
             if name in data.variables:
                 precip_name = name
                 print('BOM Reference name tag in this file:')
示例#30
0
def read_nc_delayfull(file):
    rootgrp = netcdf.NetCDFFile(file, "r")
    group_delay_full = rootgrp.variables['GroupDelayFull'].data  # in sec
    rootgrp.close()
    return group_delay_full