def __getitem__(self, index): data = (self.arguments[0] if is_constant(self.arguments[0]) else self.arguments[0][index]) new_name = "rmunits({!s})".format(getname(data)) return PhysArray(data, name=new_name, units=1)
def __getitem__(self, index): pGPP = self.arguments[0][index] # vegType = grass, shrub, or tree vegType = self.arguments[1] ptime = self.arguments[2][index] plat = self.arguments[3][index] plon = self.arguments[4][index] pgrid1d_ixy = self.arguments[5][index] pgrid1d_jxy = self.arguments[6][index] pgrid1d_lon = self.arguments[7][index] pgrid1d_lat = self.arguments[8][index] pland1d_lon = self.arguments[9][index] pland1d_lat = self.arguments[10][index] pland1d_ityplunit = self.arguments[11][index] ppfts1d_lon = self.arguments[12][index] ppfts1d_lat = self.arguments[13][index] ppfts1d_active = self.arguments[14][index] ppfts1d_itype_veg = self.arguments[15][index] ppfts1d_wtgcell = self.arguments[16][index] ppfts1d_wtlunit = self.arguments[17][index] if index is None: return PhysArray( np.zeros((0, 0, 0)), dimensions=[ ptime.dimensions[0], plat.dimensions[0], plon.dimensions[0], ], ) GPP = pGPP.data time = ptime.data lat = plat.data lon = plon.data grid1d_ixy = pgrid1d_ixy.data grid1d_jxy = pgrid1d_jxy.data grid1d_lon = pgrid1d_lon.data grid1d_lat = pgrid1d_lat.data land1d_lon = pland1d_lon.data land1d_lat = pland1d_lat.data land1d_ityplunit = pland1d_ityplunit.data pfts1d_lon = ppfts1d_lon.data pfts1d_lat = ppfts1d_lat.data pfts1d_active = ppfts1d_active.data pfts1d_itype_veg = ppfts1d_itype_veg.data pfts1d_wtgcell = ppfts1d_wtgcell.data pfts1d_wtlunit = ppfts1d_wtlunit.data # Tolerance check for weights summing to 1 eps = 1.0e-5 # If 1, pft is active active_pft = 1 # If 1, landunit is veg veg_lunit = 1 # C3 arctic grass, # C3 non-arctic grass, # C4 grass beg_grass_pfts = 12 end_grass_pfts = 14 # broadleaf evergreen shrub - temperate, # broadleaf deciduous shrub - temperate, # broadleaf deciduous shrub - boreal beg_shrub_pfts = 9 end_shrub_pfts = 11 # needleleaf evergreen tree - temperate, # needleleaf evergreen tree - boreal, # needleleaf deciduous tree - boreal, # broadleaf evergreen tree - tropical, # broadleaf evergreen tree - temperate, # broadleaf deciduous tree - tropical, # broadleaf deciduous tree - temperate, # broadleaf deciduous tree - boreal beg_tree_pfts = 1 end_tree_pfts = 8 # Will contain weighted average for grass pfts on 2d grid varo_vegType = np.zeros([len(time), len(lat), len(lon)]) tu = np.stack((pfts1d_lon, pfts1d_lat, pfts1d_active), axis=1) ind = np.stack((grid1d_ixy, grid1d_jxy), axis=1) lu = np.stack((land1d_lon, land1d_lat, land1d_ityplunit), axis=1) # Loop over lat/lons for ixy in range(len(lon)): for jxy in range(len(lat)): grid_indx = -99 # 1d grid index ind_comp = (ixy + 1, jxy + 1) gi = np.where(np.all(ind == ind_comp, axis=1))[0] if len(gi) > 0: grid_indx = gi[0] # Check for valid land gridcell if grid_indx != -99: # Gridcell lat/lons grid1d_lon_pt = grid1d_lon[grid_indx] grid1d_lat_pt = grid1d_lat[grid_indx] # veg landunit index for this gridcell t_var = (grid1d_lon_pt, grid1d_lat_pt, veg_lunit) landunit_indx = np.where(np.all(t_var == lu, axis=1))[0] # Check for valid veg landunit if landunit_indx.size > 0: if "grass" in vegType: t_var = (grid1d_lon_pt, grid1d_lat_pt, active_pft) pft_indx = np.where( np.all(t_var == tu, axis=1) * (pfts1d_wtgcell > 0.0) * (pfts1d_itype_veg >= beg_grass_pfts) * (pfts1d_itype_veg <= end_grass_pfts))[0] elif "shrub" in vegType: t_var = (grid1d_lon_pt, grid1d_lat_pt, active_pft) pft_indx = np.where( np.all(t_var == tu, axis=1) * (pfts1d_wtgcell > 0.0) * (pfts1d_itype_veg >= beg_shrub_pfts) * (pfts1d_itype_veg <= end_shrub_pfts))[0] elif "tree" in vegType: t_var = (grid1d_lon_pt, grid1d_lat_pt, active_pft) pft_indx = np.where( np.all(t_var == tu, axis=1) * (pfts1d_wtgcell > 0.0) * (pfts1d_itype_veg >= beg_tree_pfts) * (pfts1d_itype_veg <= end_tree_pfts))[0] # Check for valid pfts and compute weighted average if pft_indx.size > 0: for t in range(len(time)): if "grass" in vegType: pfts1d_wtlunit_grass = ( pfts1d_wtlunit[pft_indx]).astype( np.float32) dum = GPP[t, pft_indx] weights = pfts1d_wtlunit_grass / np.sum( pfts1d_wtlunit_grass) if np.absolute(1.0 - np.sum(weights)) > eps: print( "Weights do not sum to 1, exiting") sys.exit(-1) varo_vegType[t, jxy, ixy] = np.sum(dum * weights) elif "shrub" in vegType: pfts1d_wtlunit_shrub = ( pfts1d_wtlunit[pft_indx]).astype( np.float32) dum = GPP[t, pft_indx] weights = pfts1d_wtlunit_shrub / np.sum( pfts1d_wtlunit_shrub) varo_vegType[t, jxy, ixy] = np.sum(dum * weights) elif "tree" in vegType: pfts1d_wtlunit_tree = ( pfts1d_wtlunit[pft_indx]).astype( np.float32) dum = GPP[t, pft_indx] weights = pfts1d_wtlunit_tree / np.sum( pfts1d_wtlunit_tree) varo_vegType[t, jxy, ixy] = np.sum(dum * weights) else: varo_vegType[:, jxy, ixy] = 1e20 else: varo_vegType[:, jxy, ixy] = 1e20 else: varo_vegType[:, jxy, ixy] = 1e20 new_name = ( "CLM_pft_to_CMIP6_vegtype({}{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}{})". format( pGPP.name, vegType, ptime.name, plat.name, plon.name, pgrid1d_ixy.name, pgrid1d_jxy.name, pgrid1d_lon.name, pgrid1d_lat.name, pland1d_lon.name, pland1d_lat.name, pland1d_ityplunit.name, ppfts1d_lon.name, ppfts1d_lat.name, ppfts1d_active.name, ppfts1d_itype_veg.name, ppfts1d_wtgcell.name, ppfts1d_wtlunit.name, )) varo_vegType[varo_vegType >= 1e16] = 1e20 ma_varo_vegType = np.ma.masked_values(varo_vegType, 1e20) return PhysArray(ma_varo_vegType, name=new_name, units=pGPP.units)
def __getitem__(self, index): data_r = self.arguments[0] data = data_r if is_constant(data_r) else data_r[index] return PhysArray(data).down()
def __init__(self, name='output', dsdict=OrderedDict()): """ Initializer Parameters: name (str): String name to optionally give to a dataset dsdict (dict): Dictionary describing the dataset variables """ # Initialize a dictionary of file sections files = {} # Look over all variables in the dataset dictionary variables = OrderedDict() metavars = [] for vname, vdict in dsdict.iteritems(): vkwds = {} # Get the variable attributes, if they are defined if 'attributes' in vdict: vkwds['attributes'] = vdict['attributes'] # Get the datatype of the variable, otherwise defaults to VariableDesc default vkwds['datatype'] = 'float' if 'datatype' in vdict: vkwds['datatype'] = vdict['datatype'] # Get either the 'definition' (string definition or data) of the variables def_wrn = '' if 'definition' in vdict: vdef = vdict['definition'] if isinstance(vdef, basestring): if len(vdef.strip()) > 0: vshape = None else: def_wrn = 'Empty definition for output variable {!r} in dataset {!r}.'.format(vname, name) else: vshape = PhysArray(vdef).shape vkwds['definition'] = vdef else: def_wrn = 'No definition given for output variable {!r} in dataset {!r}.'.format(vname, name) if len(def_wrn) > 0: warn('{} Skipping output variable {}.'.format(def_wrn, vname), DefinitionWarning) continue # Get the dimensions of the variable (REQUIRED) if 'dimensions' in vdict: vdims = vdict['dimensions'] sldim = vdims[-1] if vkwds['datatype'] == 'char' else None if vshape is None: vkwds['dimensions'] = tuple(DimensionDesc(d, stringlen=(sldim==d)) for d in vdims) else: vkwds['dimensions'] = tuple(DimensionDesc(d, size=s, stringlen=(sldim==d)) for d, s in zip(vdims, vshape)) else: err_msg = 'Dimensions are required for variable {!r} in dataset {!r}'.format(vname, name) raise ValueError(err_msg) variables[vname] = VariableDesc(vname, **vkwds) # Parse the file section (if present) if 'file' in vdict: fdict = vdict['file'] if 'filename' not in fdict: err_msg = ('Filename is required in file section of variable {!r} in dataset ' '{!r}').format(vname, name) raise ValueError(err_msg) fname = fdict['filename'] if fname in files: err_msg = ('Variable {!r} in dataset {!r} claims to own file ' '{!r}, but this file is already owned by variable ' '{!r}').format(vname, name, fname, files[fname]['variables'][0]) raise ValueError(err_msg) files[fname] = {} if 'format' in fdict: files[fname]['format'] = fdict['format'] if 'deflate' in fdict: files[fname]['deflate'] = fdict['deflate'] if 'autoparse_time_variable' in fdict: files[fname]['autoparse_time_variable'] = fdict['autoparse_time_variable'] if 'attributes' in fdict: files[fname]['attributes'] = fdict['attributes'] files[fname]['variables'] = [vname] if 'metavars' in fdict: for mvname in fdict['metavars']: if mvname not in files[fname]['variables']: files[fname]['variables'].append(mvname) else: metavars.append(vname) # Loop through all character type variables and get the # Loop through all found files and create the file descriptors filedescs = [] for fname, fdict in files.iteritems(): # Get the variable descriptors for each variable required to be in the file vlist = OrderedDict([(vname, variables[vname]) for vname in fdict['variables']]) # Get the unique list of dimension names required by these variables fdims = set() for vname in vlist: vdesc = vlist[vname] for dname in vdesc.dimensions: fdims.add(dname) # Loop through all the variable names identified as metadata (i.e., no 'file') for mvname in metavars: if mvname not in fdict['variables']: vdesc = variables[mvname] # Include this variable in the file only if all of its dimensions are included # (Scalar variables are excluded and must be included as metadata explicitly) if len(vdesc.dimensions) > 0 and set(vdesc.dimensions.keys()).issubset(fdims): vlist[mvname] = vdesc # Loop through the current list of variables and check for any "bounds" or "coordinates" attributes mvnames = set() for vname in vlist: vdesc = vlist[vname] if 'bounds' in vdesc.attributes: mvname = vdesc.attributes['bounds'] if mvname not in variables: raise ValueError(('Variable {} references a bounds variable {} that is not ' 'found').format(vdesc.name, mvname)) mvnames.add(mvname) if 'coordinates' in vdesc.attributes: for mvname in vdesc.attributes['coordinates'].split(): if mvname not in variables: raise ValueError(('Variable {} references a coordinates variable {} that is not ' 'found').format(vdesc.name, mvname)) mvnames.add(mvname) # Add the bounds and coordinates to the list of variables for mvname in mvnames: if mvname not in vlist: vlist[mvname] = variables[mvname] # Create the file descriptor fdict['variables'] = [vlist[vname] for vname in vlist] fdesc = FileDesc(fname, **fdict) # Validate the variable types for the file descriptor for vname in vlist: vdesc = vlist[vname] vdtype = vdesc.datatype fformat = fdesc.format try: OutputDatasetDesc._validate_netcdf_type_(vdtype, fformat) except: vname = vdesc.name raise ValueError(('File {!r} of format {!r} cannot write variable {!r} with ' 'datatype {!r}').format(fname, fformat, vname, vdtype)) # Append the validated file descriptor to the list filedescs.append(fdesc) # Call the base class to run self-consistency checks super(OutputDatasetDesc, self).__init__(name, files=filedescs)