def order_vars(self): """ Return the order variables object. This instance is generated when data is ordered from the NSIDC. See Also -------- variables.Variables Examples -------- >>> reg_a = ipx.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28']) # doctest: +SKIP >>> reg_a.earthdata_login(user_id,user_email) # doctest: +SKIP Earthdata Login password: ········ >>> reg_a.order_vars # doctest: +SKIP <icepyx.core.variables.Variables at [location]> """ if not hasattr(self, "_order_vars"): if self._source == "order": # DevGoal: check for active session here if hasattr(self, "_cust_options"): self._order_vars = Variables( self._source, session=self._session, product=self.product, avail=self._cust_options["variables"], ) else: self._order_vars = Variables( self._source, session=self._session, product=self.product, version=self._version, ) # I think this is where property setters come in, and one should be used here? Right now order_vars.avail is only filled in # if _cust_options exists when the class is initialized, but not if _cust_options is filled in prior to another call to order_vars # if self._order_vars.avail == None and hasattr(self, '_cust_options'): # print('got into the loop') # self._order_vars.avail = self._cust_options['variables'] return self._order_vars
def vars(self): """ Return the variables object associated with the data being read in. This instance is generated from the source file or first file in a list of input files (when source is a directory). See Also -------- variables.Variables Examples -------- >>> reader = ipx.Read(path_root, "ATL06", pattern) # doctest: +SKIP >>> reader.vars # doctest: +SKIP <icepyx.core.variables.Variables at [location]> """ if not hasattr(self, "_read_vars"): self._read_vars = Variables("file", path=self._filelist[0], product=self._prod) return self._read_vars
def file_vars(self): """ Return the file variables object. This instance is generated when files are used to create the data object (not yet implemented). See Also -------- variables.Variables Examples -------- >>> reg_a = ipx.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28']) # doctest: +SKIP >>> reg_a.earthdata_login(user_id,user_email) # doctest: +SKIP Earthdata Login password: ········ >>> reg_a.file_vars # doctest: +SKIP <icepyx.core.variables.Variables at [location]> """ if not hasattr(self, "_file_vars"): if self._source == "file": self._file_vars = Variables(self._source, product=self.product) return self._file_vars
def file_vars(self): """ Return the file variables object. This instance is generated when files are used to create the data object (not yet implemented). See Also -------- variables.Variables Examples -------- >>> reg_a = icepyx.icesat2data.Icesat2Data('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28']) >>> reg_a.earthdata_login(user_id,user_email) Earthdata Login password: ········ >>> reg_a.file_vars <icepyx.core.variables.Variables at [location]> """ if not hasattr(self, '_file_vars'): if self._source == 'file': self._file_vars = Variables(self._source, dataset=self.dataset) return self._file_vars
def show_custom_options(self, dictview=False): """ Display customization/subsetting options available for this product. Parameters ---------- dictview : boolean, default False Show the variable portion of the custom options list as a dictionary with key:value pairs representing variable:paths-to-variable rather than as a long list of full variable paths. Examples -------- >>> reg_a = ipx.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28']) # doctest: +SKIP >>> reg_a.earthdata_login(user_id,user_email) # doctest: +SKIP Earthdata Login password: ········ >>> reg_a.show_custom_options(dictview=True) # doctest: +SKIP Subsetting options [{'id': 'ICESAT2', 'maxGransAsyncRequest': '2000', 'maxGransSyncRequest': '100', 'spatialSubsetting': 'true', 'spatialSubsettingShapefile': 'true', 'temporalSubsetting': 'true', 'type': 'both'}] Data File Formats (Reformatting Options) ['TABULAR_ASCII', 'NetCDF4-CF', 'Shapefile', 'NetCDF-3'] Reprojection Options [] Data File (Reformatting) Options Supporting Reprojection ['TABULAR_ASCII', 'NetCDF4-CF', 'Shapefile', 'NetCDF-3', 'No reformatting'] Data File (Reformatting) Options NOT Supporting Reprojection [] Data Variables (also Subsettable) ['ancillary_data/atlas_sdp_gps_epoch', 'ancillary_data/control', 'ancillary_data/data_end_utc', . . . 'quality_assessment/gt3r/signal_selection_source_fraction_3'] """ headers = [ "Subsetting options", "Data File Formats (Reformatting Options)", "Reprojection Options", "Data File (Reformatting) Options Supporting Reprojection", "Data File (Reformatting) Options NOT Supporting Reprojection", "Data Variables (also Subsettable)", ] keys = [ "options", "fileformats", "reprojectionONLY", "formatreproj", "noproj", "variables", ] try: all(key in self._cust_options.keys() for key in keys) except AttributeError or KeyError: self._cust_options = is2ref._get_custom_options( self._session, self.product, self._version ) for h, k in zip(headers, keys): print(h) if k == "variables" and dictview: vgrp, paths = Variables.parse_var_list(self._cust_options[k]) pprint.pprint(vgrp) else: pprint.pprint(self._cust_options[k])
def _build_single_file_dataset(self, file, groups_list): """ Create a single xarray dataset with all of the wanted variables/groups from the wanted var list for a single data file/url. Parameters ---------- file : str Full path to ICESat-2 data file. Currently tested for locally downloaded files; untested but hopefully works for s3 stored cloud files. groups_list : list of strings List of full paths to data variables within the file. e.g. ['orbit_info/sc_orient', 'gt1l/land_ice_segments/h_li', 'gt1l/land_ice_segments/latitude', 'gt1l/land_ice_segments/longitude'] Returns ------- Xarray Dataset """ file_product = self._read_single_var( file, "/").attrs["identifier_product_type"] assert ( file_product == self._prod ), "Your product specification does not match the product specification within your files." # I think the below method might NOT read the file into memory as the above might? # import h5py # with h5py.File(filepath,'r') as h5pt: # prod_id = h5pt.attrs["identifier_product_type"] # DEVNOTE: does not actually apply wanted variable list, and has not been tested for merging multiple files into one ds # if a gridded product if self._prod in [ "ATL14", "ATL15", "ATL16", "ATL17", "ATL18", "ATL19", "ATL20", "ATL21", ]: is2ds = xr.open_dataset(file) else: is2ds = self._build_dataset_template(file) # returns the wanted groups as a single list of full group path strings wanted_dict, wanted_groups = Variables.parse_var_list(groups_list, tiered=False) wanted_groups_set = set(wanted_groups) # orbit_info is used automatically as the first group path so the info is available for the rest of the groups wanted_groups_set.remove("orbit_info") # returns the wanted groups as a list of lists with group path string elements separated _, wanted_groups_tiered = Variables.parse_var_list(groups_list, tiered=True) for grp_path in ["orbit_info"] + list(wanted_groups_set): ds = self._read_single_var(file, grp_path) is2ds = Read._add_var_to_ds(is2ds, ds, grp_path, wanted_groups_tiered, wanted_dict) return is2ds