Exemple #1
0
def _run(args):

    config = read_config(args.config)
    files = config["options"]["files"]
    var_keys = config["options"]["var_keys"]
    output = config["options"]["output"]
    binary_mult = config["options"]["binary_mult"]
    binary_type = (config["options"]["binary_type"],)
    paths = config["options"]["paths"]
    out_prefix = config["options"]["out_prefix"]
    verbose = config["options"]["verbose"]

    mask = read_netcdf(paths["mask_path"], nc_vars=["mask"])["mask"]
    yi, xi = np.nonzero(mask)
    print("found {0} points in mask file.".format(len(yi)))

    xlist = []
    ylist = []
    pointlist = []
    append = False

    for i, fname in enumerate(files):
        d = read_netcdf(os.path.join(paths["in_path"], fname), verbose=verbose)

        if i == 0:

            # find point locations
            xs = d["xc"]
            ys = d["yc"]
            posinds = np.nonzero(xs > 180)
            xs[posinds] -= 360
            print("adjusted xs lon minimum")

            for y, x in pyzip(yi, xi):
                active_flag = False
                for key in var_keys:
                    if (d[key][:, y, x].all() is np.ma.masked) or (mask[y, x] == 0):
                        active_flag = True
                if not active_flag:
                    point = (ys[y, x], xs[y, x])
                    xlist.append(x)
                    ylist.append(y)
                    pointlist.append(point)

        else:
            append = True

        for y, x, point in pyzip(ylist, xlist, pointlist):

            data = np.empty((d[var_keys[0]].shape[0], len(var_keys)))

            for j, key in enumerate(var_keys):
                data[:, j] = d[key][:, y, x]

            if output["Binary"]:
                write_binary(data * binary_mult, point, binary_type, out_prefix, paths["BinaryoutPath"], append)
            if output["ASCII"]:
                write_ascii(data, point, out_prefix, paths["ASCIIoutPath"], append)
    return
Exemple #2
0
def test_pyzip():
    a = [1, 2, 3]
    b = ['a', 'b', 'c']

    z = pyzip(a, b)
    assert hasattr(z, '__iter__')
    lz = list(pyzip(a, b))
    assert lz[0] == (1, 'a')
    assert len(lz) == len(a)  # == len(b)
Exemple #3
0
def veg(data, xinds, yinds, veg_file, rootzones=3, global_lai=True):
    """Write VIC formatted veg parameter file"""

    print('writing veg parameter file: {0}'.format(veg_file))

    # counter for bad grid cells
    count = 0

    f = open(veg_file, 'w')

    for y, x in pyzip(yinds, xinds):
        gridcell = int(data['gridcell'][y, x])
        n_veg = int(data['Nveg'][y, x])
        cv = data['Cv'][:, y, x]
        veg_class = np.nonzero(cv)[0]

        if not len(veg_class) == n_veg:
            count += 1

        line1 = str(gridcell) + ' ' + str(n_veg) + '\n'
        f.write(line1)
        if n_veg > 0:
            for j in veg_class:
                line2 = [str(j + 1)]
                line2.append(str(cv[j]))
                for k in range(rootzones):
                    line2.append(str(data['root_depth'][j, k, y, x]))
                    line2.append(str(data['root_fract'][j, k, y, x]))
                line2.append('\n')
                f.write(' '.join(line2))
                if global_lai:
                    line3 = []
                    for m in range(12):
                        line3.append(str(data['LAI'][j, m, y, x]))
                    line3.append('\n')
                    f.write(' '.join(line3))
    f.close()

    print('{0} grid cells have unequal veg_classes'.format(count))
    print('finished writing veg parameter file: {0}'.format(veg_file))

    return
Exemple #4
0
    def set_fileformat(self, fileformat):
        """sets and assigns fileformat specific attributes and methods"""

        if fileformat == 'ascii':
            delimeter = r'\t'  # VIC ascii files are tab seperated
        else:
            delimeter = r','  # true csv

        for p in self:
            p.fileformat = fileformat
            if fileformat in ['ascii', 'csv']:
                p.open = p._open_ascii
                p.delimeter = delimeter
                p.read = p._read_ascii
            elif fileformat == 'binary':
                p.open = p._open_binary
                p.read = p._read_binary
                p.dt = np.dtype(list(pyzip(p.names, p.bin_dtypes)))
            elif fileformat == 'netcdf':
                p.open = p._open_netcdf
                p.read = p._read_netcdf
            else:
                raise ValueError('Unknown file format: {0}'.format(fileformat))
        return
Exemple #5
0
def vic2nc(options, global_atts, domain_dict, fields):
    """ Convert ascii VIC files to netCDF format"""

    # determine run mode
    if (options['memory_mode'] == 'standard') \
            and (options['chunksize'] in ['all', 'All', 'ALL', 0]):
        memory_mode = 'big_memory'
    else:
        memory_mode = options['memory_mode']

    print("\n-------------------------------")
    print("Configuration File Options")
    print("-------------OPTIONS-------------")
    for pair in options.items():
        print("{0}: {1}".format(*pair))
    print('Fields: {0}'.format(", ".join(fields.keys())))
    if domain_dict:
        print("-------------DOMAIN--------------")
        for pair in domain_dict.items():
            print("{0}: {1}".format(*pair))
    print("--------GLOBAL_ATTRIBUTES--------")
    for pair in global_atts.items():
        print("{0}: {1}".format(*pair))
    print("--------RUN MODE--------")
    print('Memory Mode: {0}'.format(memory_mode))
    if memory_mode == 'standard':
        print('Chunksize={0}'.format(options['chunksize']))
    print("---------------------------------\n")
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # Make output directory
    if not os.path.exists(options['out_directory']):
        os.makedirs(options['out_directory'])
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # Make pairs (i.e. find inds)
    files = glob(options['input_files'])
    points = get_file_coords(files)
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # Get target grid information
    if domain_dict:
        domain = read_domain(domain_dict)
        target_grid_file = path.split(domain_dict['filename'])[1]
        global_atts['target_grid_file'] = target_grid_file
    else:
        # must be a regular grid, build from file names
        domain = calc_grid(points.get_lats(), points.get_lons())
        target_grid_file = None
        domain_dict = {'y_x_dims': ['lat', 'lon']}
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # Get grid index locations
    points = get_grid_inds(domain, points)
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # Get timestamps
    if options['input_file_format'].lower() == 'ascii':
        if ('bin_start_date' in options
            and 'bin_end_date' in options
                and 'bin_dt_sec' in options):
            vic_datelist, vic_ordtime = make_dates(
                options['bin_start_date'],
                options['bin_end_date'],
                options['bin_dt_sec'],
                calendar=options['calendar'])
        else:
            vic_datelist = get_dates(files[0])
            vic_ordtime = date2num(vic_datelist, TIMEUNITS,
                                   calendar=options['calendar'])

    elif options['input_file_format'].lower() in ['binary', 'netcdf']:
        vic_datelist, vic_ordtime = make_dates(options['bin_start_date'],
                                               options['bin_end_date'],
                                               options['bin_dt_sec'],
                                               calendar=options['calendar'])

    else:
        raise ValueError('Unknown input file format: {}. Valid options are \
                         ascii or binary'.format(options['input_file_format']))
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # Determine time segmentation
    if options['start_date']:
        start_date = datetime.strptime(options['start_date'], TIMESTAMPFORM)
        if start_date < vic_datelist[0]:
            print("WARNING: Start date in configuration file is before "
                  "first date in file.")
            start_date = vic_datelist[0]
            print('WARNING: New start date is {0}'.format(start_date))
    else:
        start_date = vic_datelist[0]

    if options['end_date']:
        end_date = datetime.strptime(options['end_date'], TIMESTAMPFORM)
        if end_date > vic_datelist[-1]:
            print("WARNING: End date in configuration file is after "
                  "last date in file.")
            end_date = vic_datelist[-1]
            print('WARNING: New end date is {0}'.format(end_date))
    else:
        end_date = vic_datelist[-1]

    # Ordinal Time
    start_ord = date2num(start_date, TIMEUNITS, calendar=options['calendar'])
    end_ord = date2num(end_date, TIMEUNITS, calendar=options['calendar'])

    print("netCDF Start Date: {0}".format(start_date))
    print("netCDF End Date: {0}".format(end_date))

    segment_dates = []
    if options['time_segment'] == 'day':
        # calendar insensitive
        num_segments = np.ceil(end_ord - start_ord)
        if start_date.hour == 0:
            segment_dates = num2date(np.arange(start_ord, end_ord + 1, 1),
                                     TIMEUNITS, calendar=options['calendar'])
        else:
            # allow start at time other than 0
            temp = [start_ord].append(np.arange(np.ceil(start_ord),
                                      end_ord + 1, 1))
            segment_dates = num2date(temp, TIMEUNITS,
                                     calendar=options['calendar'])
    elif options['time_segment'] == 'month':
        num_segments = (end_date.year - start_date.year) * 12 \
            + end_date.month - start_date.month + 1
        month = start_date.month
        year = start_date.year
        for i in pyrange(num_segments + 1):
            segment_dates.append(datetime(year, month, 1))
            month += 1
            if month == 13:
                month = 1
                year += 1
    elif options['time_segment'] == 'year':
        num_segments = end_date.year - start_date.year + 1
        year = start_date.year
        for i in pyrange(num_segments + 1):
            segment_dates.append(datetime(year, 1, 1))
            year += 1
    elif options['time_segment'] == 'decade':
        num_segments = (end_date.year - start_date.year) / 10 + 1
        year = start_date.year
        for i in pyrange(num_segments + 1):
            segment_dates.append(datetime(year, 1, 1))
            year += 10
    elif options['time_segment'] == 'all':
        num_segments = 1
        segment_dates = [start_date, end_date]
    else:
        raise ValueError('Unknown timesegment options \
                         {0}'.format(options['time_segment']))
    print("Number of files: {0}".format(len(segment_dates) - 1))
    assert len(segment_dates) == num_segments + 1

    # Make sure the first and last dates are start/end_date
    segment_dates[0] = start_date
    segment_dates[-1] = end_date + timedelta(minutes=1)
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # Setup Segments
    segments = deque()

    for num in pyrange(num_segments):
        # Segment time bounds
        t0 = segment_dates[num]
        t1 = segment_dates[num + 1]

        # Get segment inds
        i0 = bisect_left(vic_datelist, t0)
        i1 = bisect_left(vic_datelist, t1)

        # Make segment filename (with path)
        if options['time_segment'] == 'day':
            filename = "{0}.{1}.nc".format(options['out_file_prefix'],
                                           t0.strftime('%Y-%m-%d'))
        elif options['time_segment'] == 'month':
            filename = "{0}.{1}.nc".format(options['out_file_prefix'],
                                           t0.strftime('%Y-%m'))
        elif options['time_segment'] == 'year':
            filename = "{0}.{1}.nc".format(options['out_file_prefix'],
                                           t0.strftime('%Y'))
        elif options['time_segment'] == 'all':
            filename = "{0}.{1}-{2}.nc".format(options['out_file_prefix'],
                                               t0.strftime('%Y%m%d'),
                                               t1.strftime('%Y%m%d'))

        filename = path.join(options['out_directory'], filename)

        # Setup segment and initialize netcdf
        segment = Segment(num, i0, i1, options['out_file_format'],
                          filename, memory_mode=memory_mode)
        segment.nc_globals(**global_atts)
        segment.nc_time(t0, t1, vic_ordtime, options['calendar'])
        segment.nc_dimensions(snow_bands=options['snow_bands'],
                              veg_tiles=options['veg_tiles'],
                              soil_layers=options['soil_layers'])

        segment.nc_domain(domain)
        segment.nc_fields(fields,
                          domain_dict['y_x_dims'], options['precision'])

        print(repr(segment))
        segments.append(segment)
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # Get column numbers and names (will help speed up reading)
    names = []
    usecols = []
    dtypes = []
    bin_dtypes = []
    bin_mults = []

    if options['precision'] == 'double':
        prec = NC_DOUBLE
    else:
        prec = NC_FLOAT

    for name, field in fields.items():

        if not np.isscalar(field['column']):
            # multiple levels
            for i, col in enumerate(field['column']):
                names.append(name + str(i))
                usecols.append(col)
            if 'type' in field:
                if type(field['type']) == list:
                    dtypes.extend(field['type'])
                else:
                    dtypes.extend([field['type']] * len(field['column']))
            else:
                dtypes.append([prec] * len(field['column']))

            if options['input_file_format'].lower() == 'binary':
                if 'bin_dtype' in field:
                    if type(field['bin_dtype']) == list:
                        bin_dtypes.extend(field['bin_dtype'])
                    else:
                        bin_dtypes.extend([field['bin_dtype']] *
                                          len(field['column']))
                else:
                    raise ValueError('bin_dtype not in field: {}'.format(name))

                if 'bin_mult' in field:
                    if type(field['bin_mult']) == list:
                        bin_mults.extend(field['bin_mult'])
                    else:
                        bin_mults.extend([field['bin_mult']] *
                                         len(field['column']))
                else:
                    bin_mults.extend([1.0] * len(field['column']))
        else:
            # no levels
            names.append(name)
            usecols.append(field['column'])

            if 'type' in field:
                dtypes.append(field['type'])
            else:
                dtypes.append(prec)

            if options['input_file_format'].lower() == 'binary':
                if 'bin_dtype' in field:
                    bin_dtypes.append(field['bin_dtype'])
                else:
                    raise ValueError('bin_dtype not in field: {}'.format(name))

                if 'bin_mult' in field:
                    bin_mults.append(field['bin_mult'])
                else:
                    bin_mults.append(1.0)

    print('setting point attributes (fileformat, names, usecols, and dtypes)')
    # pandas.read_table does not 'honor' the order of the columns in usecols
    # it simply uses them in ascending order. So the names need to be sorted
    # the same way. For example, if the columns in the VIC file are:
    # 3: prcp; 4: evap; 5: runoff; 6; baseflow; 7: sm1; 8: sm2; 9: sm3; 10: swe
    # and this is parsed from the configuration file as
    # usecols = [3, 4, 5, 6, 10, 7, 8, 9]
    # names=['prcp', 'evap', 'runoff', 'baseflow', 'swe', 'sm1', 'sm2', 'sm3']
    # then without sorting, the netcdf file will have the wrong variables:
    # nc_swe will contain sm1, nc_sm1 will contain sm2, nc_sm2: sm3 and
    # nc_swe: sm3
    # the following will ensure that the names are sorted in increasing column
    # order. Note that sorted(usecols) is not strictly necessary, since
    # apparently that is done in read_table, but it keeps the names and columns
    # in the same order
    names = [x for (y, x) in sorted(pyzip(usecols, names))]
    usecols = sorted(usecols)
    points.set_names(names)
    points.set_usecols(usecols)
    points.set_dtypes(dtypes)
    # set binary attributes
    if options['input_file_format'].lower() == 'binary':
        points.set_bin_dtypes(bin_dtypes)
        points.set_bin_mults(bin_mults)
    points.set_fileformat(options['input_file_format'])
    print('done')
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    if memory_mode == 'big_memory':
        # ------------------------------------------------------------ #
        # run in big memory mode
        for i, segment in enumerate(segments):
            segments[i].allocate()

        while points:
            point = points.popleft()
            point.open()
            point.read()
            point.close()

            for segment in segments:
                segment.nc_add_data_to_array(point)

        for segment in segments:
            segment.nc_write_data_from_array()
            segment.nc_close()
        # ------------------------------------------------------------ #

    elif memory_mode == 'standard':
        # ------------------------------------------------------------ #
        # Open VIC files and put data into netcdfs

        chunk = Plist()
        while points:
            point = points.popleft()
            point.open()
            point.read()
            point.close()
            chunk.append(point)
            if len(chunk) > int(options['chunksize']) or len(points) == 0:
                for segment in segments:
                    segment.nc_add_data_standard(chunk)
                chunk = Plist()
            del point
        # ------------------------------------------------------------ #

        # ------------------------------------------------------------ #
        # Close the netcdf files
        for segment in segments:
            segment.nc_close()
        # ------------------------------------------------------------ #
    elif memory_mode == 'original':
        # ------------------------------------------------------------ #
        # Run in original memory mode (a.k.a. vic2nc.c mode)
        # Open all files
        for point in points:
            point.open()

        while segments:
            segment = segments.popleft()
            segment.allocate()
            count = segment.count

            for point in points:
                point.read(count)
                segment.nc_add_data_to_array(point)

            segment.nc_write_data_from_array()
            segment.nc_close()

        for point in points:
            point.close()
        # ------------------------------------------------------------ #

    return
Exemple #6
0
def nc_to_vic(config_file):
    ''' This function converts netCDF files to VIC ascii format files.
        (This function is adapted from tonic)
        
        Parameters
        ----------
        config_file: <str>
            Path of config file for nc_to_vic
        
        Returns
        ----------
        
        Requires
        ----------
        write_binary
    '''
    
    import numpy as np
    import struct
    import os
    from tonic.io import read_netcdf, read_config
    from tonic.pycompat import pyzip
    
    config = read_config(config_file)
    files = config['options']['files']  # should contain "{}", which will be replaced by YYYY
    var_keys = config['options']['var_keys']
    output_format = config['options']['output_format']  # Binary or ASCII
    out_prefix = config['options']['out_prefix']
    verbose = config['options']['verbose']
    coord_keys = config['options']['coord_keys']  # varname of lon and lat in netCDF files
    lon_name = coord_keys[0]
    lat_name = coord_keys[1]
    start_year = config['options']['start_year']
    end_year = config['options']['end_year']
    latlon_precision = config['options']['latlon_precision']
    
    paths = config['paths']
    mask_varname = paths['mask_varname']

    mask = read_netcdf(paths['mask_path'], variables=['mask'])[0][mask_varname]
    yi, xi = np.nonzero(mask)
    print('found {0} points in mask file.'.format(len(yi)))

    xlist = []
    ylist = []
    pointlist = []
    append = False

    for i, year in enumerate(range(start_year, end_year+1)):
        print('Year {}'.format(year))
        fname = files.format(year)
        d = read_netcdf(os.path.join(paths['in_path'], fname),
                        verbose=verbose)[0]

        if i == 0:

            # find point locations
            xs = d[lon_name]
            ys = d[lat_name]
            posinds = np.nonzero(xs > 180)
            xs[posinds] -= 360
            print('adjusted xs lon minimum')

            for y, x in pyzip(yi, xi):
                active_flag = False
                for key in var_keys:
                    if (d[key][:, y, x].all() is np.ma.masked) \
                            or (mask[y, x] == 0):
                        active_flag = True
                if not active_flag:
                    point = (ys[y], xs[x])
                    xlist.append(x)
                    ylist.append(y)
                    pointlist.append(point)

        else:
            append = True

        for y, x, point in pyzip(ylist, xlist, pointlist):

            data = np.empty((d[var_keys[0]].shape[0], len(var_keys)))

            for j, key in enumerate(var_keys):
                data[:, j] = d[key][:, y, x]

            if output_format == 'Binary':
                write_binary(data * binary_mult, point, binary_type,
                             out_prefix, paths['BinaryoutPath'], append)
            if output_format == 'ASCII':
                write_ascii(data, point, out_prefix, paths['ASCIIoutPath'],
                            latlon_precision, append)
    return
Exemple #7
0
def _run(args):

    config = read_config(args.config)
    files = config['options']['files']
    var_keys = config['options']['var_keys']
    output = config['options']['output']
    binary_mult = config['options']['binary_mult']
    binary_type = config['options']['binary_type'],
    paths = config['options']['paths']
    out_prefix = config['options']['out_prefix']
    verbose = config['options']['verbose']

    mask = read_netcdf(paths['mask_path'], nc_vars=['mask'])['mask']
    yi, xi = np.nonzero(mask)
    print('found {0} points in mask file.'.format(len(yi)))

    xlist = []
    ylist = []
    pointlist = []
    append = False

    for i, fname in enumerate(files):
        d = read_netcdf(os.path.join(paths['in_path'], fname), verbose=verbose)

        if i == 0:

            # find point locations
            xs = d['xc']
            ys = d['yc']
            posinds = np.nonzero(xs > 180)
            xs[posinds] -= 360
            print('adjusted xs lon minimum')

            for y, x in pyzip(yi, xi):
                active_flag = False
                for key in var_keys:
                    if (d[key][:, y, x].all() is np.ma.masked) \
                            or (mask[y, x] == 0):
                        active_flag = True
                if not active_flag:
                    point = (ys[y, x], xs[y, x])
                    xlist.append(x)
                    ylist.append(y)
                    pointlist.append(point)

        else:
            append = True

        for y, x, point in pyzip(ylist, xlist, pointlist):

            data = np.empty((d[var_keys[0]].shape[0], len(var_keys)))

            for j, key in enumerate(var_keys):
                data[:, j] = d[key][:, y, x]

            if output['Binary']:
                write_binary(data * binary_mult, point, binary_type,
                             out_prefix, paths['BinaryoutPath'], append)
            if output['ASCII']:
                write_ascii(data, point, out_prefix, paths['ASCIIoutPath'],
                            append)
    return
Exemple #8
0
def grid_params(soil_dict, target_grid, snow_dict, veg_dict, veglib_dict,
                version='4.1.2'):
    """
    Reads the coordinate information from the soil_dict and target_grid and
    maps all input dictionaries to the target grid.  Returns a grid_dict with
    the mapped input dictionary data.
    """
    print('gridding params now...')

    yi, xi = latlon2yx(soil_dict['lats'], soil_dict['lons'],
                       target_grid[YVAR], target_grid[XVAR])

    in_dicts = {'soil_dict': soil_dict}
    out_dicts = OrderedDict()
    if snow_dict:
        in_dicts['snow_dict'] = snow_dict
    else:
        out_dicts['snow_dict'] = False
    if veg_dict:
        in_dicts['veg_dict'] = veg_dict
    else:
        out_dicts['veg_dict'] = False

    # get "unmasked" mask
    mask = target_grid['mask']

    ysize, xsize = target_grid['mask'].shape

    ymask, xmask = np.nonzero(mask != 1)

    print('{0} masked values'.format(len(ymask)))

    for name, mydict in in_dicts.items():
        out_dict = OrderedDict()

        for var in mydict:
            if mydict[var].dtype in [np.int, np.int64, np.int32]:
                fill_val = FILLVALUE_I
                dtype = np.int
            else:
                fill_val = FILLVALUE_F
                dtype = np.float

            if mydict[var].ndim == 1:
                out_dict[var] = np.ma.zeros((ysize, xsize),
                                            dtype=dtype)
                out_dict[var][yi, xi] = mydict[var]
                out_dict[var][ymask, xmask] = fill_val

            elif mydict[var].ndim == 2:
                steps = mydict[var].shape[1]
                out_dict[var] = np.ma.zeros((steps, ysize, xsize),
                                            dtype=dtype)
                for i in pyrange(steps):
                    out_dict[var][i, yi, xi] = mydict[var][:, i]
                out_dict[var][:, ymask, xmask] = fill_val

            elif mydict[var].ndim == 3:
                j = mydict[var].shape[1]
                k = mydict[var].shape[2]
                out_dict[var] = np.ma.zeros((j, k, ysize, xsize),
                                            dtype=dtype)
                for jj in pyrange(j):
                    for kk in pyrange(k):
                        out_dict[var][jj, kk, yi, xi] = mydict[var][:, jj, kk]
                for y, x in pyzip(ymask, xmask):
                    out_dict[var][:, :, y, x] = fill_val

            out_dict[var] = np.ma.masked_values(out_dict[var], fill_val)

        out_dicts[name] = out_dict

    if veglib_dict and version == '5.0.dev':
        # adjust vars for the following conditions
        # bare soil tile

        # Add bare soil tile
        var = 'Cv'
        bare = 1 - out_dicts['veg_dict'][var].sum(axis=0)
        bare[bare < 0.0] = 0.0
        nveg_clases = out_dicts['veg_dict'][var].shape[0] + 1
        shape = (nveg_clases, ) + out_dicts['veg_dict'][var].shape[1:]
        new = np.zeros(shape)
        new[:-1, :, :] = out_dicts['veg_dict'][var]
        new[-1, :, :] = bare
        new /= new.sum(axis=0)
        new[:, ymask, xmask] = FILLVALUE_F
        out_dicts['veg_dict'][var] = new

        # add dummy values for other veg vars
        #   double root_depth(veg_class, root_zone, nj, ni) ;
        #   double root_fract(veg_class, root_zone, nj, ni) ;
        #   double LAI(veg_class, month, nj, ni) ;
        for var in ['root_depth', 'root_fract', 'LAI']:
            shape = (nveg_clases, ) + out_dicts['veg_dict'][var].shape[1:]
            new = np.zeros(shape) + FILLVALUE_F
            new[:-1, :, :] = out_dicts['veg_dict'][var]
            new[-1, :, :] = 0
            out_dicts['veg_dict'][var] = np.ma.masked_values(new, FILLVALUE_F)

        # Distribute the veglib variables
        # 1st - the 1d vars
        #   double lib_overstory(veg_class) ;  --> (veg_class, nj, ni)
        for var in ['overstory', 'rarc', 'rmin', 'wind_h', 'RGL', 'rad_atten',
                    'rad_atten', 'wind_atten', 'trunk_ratio', 'snow_albedo']:
            lib_var = 'lib_{0}'.format(var)
            new = np.zeros((nveg_clases, ysize, xsize)) + FILLVALUE_F
            new[:-1, yi, xi] = veglib_dict[lib_var][:, np.newaxis]
            new[-1, yi, xi] = 0
            new[:, ymask, xmask] = fill_val
            out_dicts['veg_dict'][var] = np.ma.masked_values(new, FILLVALUE_F)

        # 2nd - the 2d vars
        for var in ['albedo', 'veg_rough', 'displacement']:
            lib_var = 'lib_{0}'.format(var)
            shape = (nveg_clases, veglib_dict[lib_var].shape[1], ysize, xsize)
            new = np.zeros(shape) + FILLVALUE_F
            new[:-1, :, yi, xi] = veglib_dict[lib_var][:, :, np.newaxis]
            new[-1, :, yi, xi] = 0
            for y, x in pyzip(ymask, xmask):
                new[:, :, y, x] = fill_val
            out_dicts['veg_dict'][var] = np.ma.masked_values(new, FILLVALUE_F)

        # 3rd - remove the redundant vars
        #   double lib_LAI(veg_class, month) ;
        # removed from file

    return out_dicts
Exemple #9
0
def _run(args):

    config = read_config(args.config)
    files = [config['Basics']['files']]
    var_keys = [config['Basics']['var_keys']]
    output = config['Paths']['ASCIIoutPath']
    #binary_mult = config['Basics']['binary_mult']
    #binary_type = config['Basics']['binary_type'],
    paths = config['Paths']
    out_prefix = config['Basics']['out_prefix']
    verbose = config['Basics']['verbose']	 
    mask = read_netcdf(paths['mask_path'], variables=['mask'])[0]['mask']
    yi, xi = np.nonzero(mask)
    print(mask)
    print('found {0} points in mask fqile.'.format(len(xi)))
    #x = read_netcdf(os.path.join(paths['in_path'], 'pr_1979.nc'))
    #print(x)

    xlist = []
    ylist = []
    pointlist = []
    append = False

    for i, fname in enumerate(files):
        d = read_netcdf(os.path.join(paths['in_path'], fname),
                        verbose=verbose)[0]
        print(i)
        if i == 0:

            # find point locations
            xs = d['lon']
            ys = d['lat']
            posinds = np.nonzero(xs > 180)
            xs[posinds] -= 360
            print('adjusted xs lon minimum')

	    
            for y, x in pyzip(yi, xi):
                active_flag = False
                for key in var_keys:
                    if (d[key][:, y, x].all() is np.ma.masked) \
                            or (mask[y, x] == 0):
                        active_flag = True
                if not active_flag:	  
                    point = (ys[y], xs[x])
		    print(point)
                    xlist.append(x)
                    ylist.append(y)
                    pointlist.append(point)

        else:
            append = True

        for y, x, point in pyzip(ylist, xlist, pointlist):

            data = np.empty((d[var_keys[0]].shape[0], len(var_keys)))

            for j, key in enumerate(var_keys):
                data[:, j] = d[key][:, y, x]

            #if output['Binary']:
            #   write_binary(data * binary_mult, point, binary_type,
            #               out_prefix, paths['BinaryoutPath'], append)
            #if output['ASCII']:
            write_ascii(data, point, out_prefix, paths['ASCIIoutPath'],
                            append)
    return