def fetch_Landsat8_scene_list(): """ Simple downloads and extracts the most recent version of the scene_list text file for reference http://landsat-pds.s3.amazonaws.com/scene_list.gz :return scene_list_text_data: returns a text data object with all the data on scene inventory on amazon WS. """ print("Updating scene list") # define save path for new scene list directory = site.getsitepackages()[1] gz_path = "{0}/dnppy/landsat/metadata/scene_list.gz".format(directory) txt_path = "{0}/dnppy/landsat/metadata/scene_list.txt".format(directory) # download then extract the gz file to a txt file. download_url("http://landsat-pds.s3.amazonaws.com/scene_list.gz", gz_path) with gzip.open(gz_path, 'rb') as gz: content = gz.read() with open(txt_path, 'wb+') as f: f.writelines(content) # build a new text data object from the fresh scene list scene_list_text_data = textio.text_data() scene_list_text_data.read_csv(txt_path, delim=",", has_headers=True) return scene_list_text_data
def fetch_Landsat8_scene_list(): """ Simple downloads and extracts the most recent version of the scene_list text file for reference http://landsat-pds.s3.amazonaws.com/scene_list.gz :return scene_list_text_data: returns a text data object with all the data on scene inventory on amazon WS. """ print("Updating scene list") # define save path for new scene list directory = site.getsitepackages()[1] gz_path = "{0}/dnppy/landsat/metadata/scene_list.gz".format(directory) txt_path = "{0}/dnppy/landsat/metadata/scene_list.txt".format(directory) # download then extract the gz file to a txt file. download_url("http://landsat-pds.s3.amazonaws.com/scene_list.gz", gz_path) with gzip.open(gz_path,'rb') as gz: content = gz.read() with open(txt_path, 'wb+') as f: f.writelines(content) # build a new text data object from the fresh scene list scene_list_text_data = textio.text_data() scene_list_text_data.read_csv(txt_path, delim = ",", has_headers = True) return scene_list_text_data
def to_csv(self, csv_path): """ Writes the row data of this time_series to a csv file. """ # disallow overwriting the csv used as input. Added by request if os.path.abspath(self.infilepath) == os.path.abspath(csv_path): csv_path = csv_path.replace(".csv", "_out.csv") print("Saved time series '{0}' with {1} rows and {2} columns".format( self.name, len(self.row_data), len(self.col_data))) tdo = textio.text_data( text_filepath = csv_path, headers = self.headers, row_data = self.row_data) tdo.write_csv() return
def datatype_library(): """ This function builds the datatype_library dict out of file datatype_library.csv and returns it. Adding to this datatype library should be done by editing the csv file, not this code. Note that the standard format for names is ``<product short name>_<resolution identifier>_<coverage area>``, such as "TRMM_1.0_GLOBAL", or "GPM_IMERG_0.1_GLOBAL". Geotransform math used to create the array [A, B, C, D, E, F] .. code-block:: python x = A + iB + jC y = D + iE + jF Where x,y are real spatial coordinates, and i,j are matrix indices. A, B, C, D, E, F and are coefficients that make up the geotransformation array. :return datatype_library_dict: A dictionary """ # empty dict datatype_dict = {} # find path of this installation dirname = os.path.dirname(__file__) # read in the library lib_path = os.path.join(dirname,"lib","datatype_library.csv") tdat = textio.text_data() tdat.read_csv(lib_path) rows = tdat.row_data for row in rows: # interpret text file rows name = str(row[0].replace(" ","")) proj = str(row[1].replace(" ","").split("-")[-1]) proj_lib = str("-".join(row[1].replace(" ","").split("-")[:-1])) A = float(row[2]) B = float(row[3]) C = float(row[4]) D = float(row[5]) E = float(row[6]) F = float(row[7]) dls = str(row[8]) # build projection text from osr library if proj_lib == "EPSG": srs = osr.SpatialReference() srs.ImportFromEPSG(int(proj)) proj_text = srs.ExportToWkt() # read projection from SR-ORG prj file. else: proj_fname = os.path.join(dirname, "lib","prj","{0}.prj".format(proj)) with open(proj_fname, "r") as f: proj_text = f.read() # assemble the geotransform geotrans = (A, B, C, D, E, F) # create the datatype instance datatype_dict[name] = datatype(name = name, projectionID = "-".join([proj_lib, proj]), geotransform = geotrans, projectionTXT = proj_text, downloadSource = dls) return datatype_dict
def datatype_library(): """ This function builds the datatype_library dict out of file datatype_library.csv and returns it. Adding to this datatype library should be done by editing the csv file, not this code. Note that the standard format for names is ``<product short name>_<resolution identifier>_<coverage area>``, such as "TRMM_1.0_GLOBAL", or "GPM_IMERG_0.1_GLOBAL". Geotransform math used to create the array [A, B, C, D, E, F] .. code-block:: python x = A + iB + jC y = D + iE + jF Where x,y are real spatial coordinates, and i,j are matrix indices. A, B, C, D, E, F and are coefficients that make up the geotransformation array. :return datatype_library_dict: A dictionary """ # empty dict datatype_dict = {} # find path of this installation dirname = os.path.dirname(__file__) # read in the library lib_path = os.path.join(dirname, "lib", "datatype_library.csv") tdat = textio.text_data() tdat.read_csv(lib_path) rows = tdat.row_data for row in rows: # interpret text file rows name = str(row[0].replace(" ", "")) proj = str(row[1].replace(" ", "").split("-")[-1]) proj_lib = str("-".join(row[1].replace(" ", "").split("-")[:-1])) A = float(row[2]) B = float(row[3]) C = float(row[4]) D = float(row[5]) E = float(row[6]) F = float(row[7]) dls = str(row[8]) # build projection text from osr library if proj_lib == "EPSG": srs = osr.SpatialReference() srs.ImportFromEPSG(int(proj)) proj_text = srs.ExportToWkt() # read projection from SR-ORG prj file. else: proj_fname = os.path.join(dirname, "lib", "prj", "{0}.prj".format(proj)) with open(proj_fname, "r") as f: proj_text = f.read() # assemble the geotransform geotrans = (A, B, C, D, E, F) # create the datatype instance datatype_dict[name] = datatype(name=name, projectionID="-".join([proj_lib, proj]), geotransform=geotrans, projectionTXT=proj_text, downloadSource=dls) return datatype_dict