def makeSHP(dic): shpname = saveSHP() shpWriter = Writer() shpWriter.autoBalance = 1 shpWriter.field(headerEntry.get(), 'C', '255') shpWriter.field('Longitude', 'F') shpWriter.field('Latitude', 'F') geomtype =1 shpWriter.shapeType = geomtype parsedGeometryList = [] dicValList = [] dicKeyList = [] for k in dic.keys(): dicValList.append(dic[k]) valist = k,dic[k][0],dic[k][1] dicKeyList.append(valist) [parsedGeometryList.append(filez) for filez in dicValList] [shpWriter.point(*parsedGeometry) for parsedGeometry in parsedGeometryList] [shpWriter.record(*dList) for dList in dicKeyList] shpWriter.save(shpname) prj = generatePRJ(int(sridEntry.get())) if prj != None: prjfile = shpname.replace('.shp','') + '.prj' prjfileOpen = open(prjfile, 'w') prjfileOpen.write(prj) prjfileOpen.close() return shpname
def extract_HUC8( self, HUC8, output, gagefile='gagestations', verbose=True, ): """ Extracts the USGS gage stations for a watershed from the gage station shapefile into a shapefile for the 8-digit hydrologic unit code of interest. """ # make sure the metadata exist locally self.download_metadata() # make sure the output destination exists if not os.path.isdir(output): os.mkdir(output) sfile = '{}/{}'.format(output, gagefile) if not os.path.isfile(sfile + '.shp'): # copy the projection shutil.copy(self.NWIS + '.prj', sfile + '.prj') # read the file gagereader = Reader(self.NWIS, shapeType=1) gagerecords = gagereader.records() # pull out the HUC8 record to parse the dataset HUC8_index = gagereader.fields.index(['HUC', 'C', 8, 0]) - 1 # iterate through the field and find gages in the watershed its = HUC8, sfile print('extracting gage stations in {} to {}\n'.format(*its)) gage_indices = [] i = 0 for record in gagerecords: if record[HUC8_index] == HUC8: gage_indices.append(i) i += 1 # write the data from the HUC8 to a new shapefile w = Writer(shapeType=1) for field in gagereader.fields: w.field(*field) for i in gage_indices: point = gagereader.shape(i).points[0] w.point(*point) w.record(*gagerecords[i]) w.save(sfile) if verbose: print('successfully extracted NWIS gage stations\n') elif verbose: print('gage station file {} exists\n'.format(sfile)) self.set_metadata(sfile)
def extract_HUC8(self, HUC8, output, gagefile = 'gagestations', verbose = True): """Extracts the USGS gage stations for a watershed from the gage station shapefile into a shapefile for the 8-digit hydrologic unit code of interest. """ # make sure the metadata exist locally self.download_metadata() # make sure the output destination exists if not os.path.isdir(output): os.mkdir(output) sfile = '{}/{}'.format(output, gagefile) if not os.path.isfile(sfile + '.shp'): # copy the projection shutil.copy(self.NWIS + '.prj', sfile + '.prj') # read the file gagereader = Reader(self.NWIS, shapeType = 1) gagerecords = gagereader.records() # pull out the HUC8 record to parse the dataset HUC8_index = gagereader.fields.index(['HUC', 'C', 8, 0]) - 1 # iterate through the field and find gages in the watershed its = HUC8, sfile print('extracting gage stations in {} to {}\n'.format(*its)) gage_indices = [] i = 0 for record in gagerecords: if record[HUC8_index] == HUC8: gage_indices.append(i) i+=1 # write the data from the HUC8 to a new shapefile w = Writer(shapeType = 1) for field in gagereader.fields: w.field(*field) for i in gage_indices: point = gagereader.shape(i).points[0] w.point(*point) w.record(*gagerecords[i]) w.save(sfile) if verbose: print('successfully extracted NWIS gage stations\n') elif verbose: print('gage station file {} exists\n'.format(sfile)) self.set_metadata(sfile)
def extract_bbox(self, bbox, output, verbose=True): """Extracts the NID dam locations for a watershed from the dam shapefile and the 8-digit hydrologic unit code of interest. """ self.download_compressed() xmin, ymin, xmax, ymax = bbox # copy the projection files if verbose: print('copying the projections from the NID source\n') projection = self.source + '.prj' shutil.copy(projection, output + '.prj') # get the dams within the watershed if verbose: print('reading the dam file\n') sf = Reader(self.source, shapeType=1) # work around for issues with pyshp damrecords = [] for i in range(len(sf.shapes())): try: damrecords.append(sf.record(i)) except: damrecords.append([-100 for i in range(len(sf.fields))]) name_index = sf.fields.index(['DAM_NAME', 'C', 65, 0]) - 1 nid_index = sf.fields.index(['NIDID', 'C', 7, 0]) - 1 long_index = sf.fields.index(['LONGITUDE', 'N', 19, 11]) - 1 lat_index = sf.fields.index(['LATITUDE', 'N', 19, 11]) - 1 river_index = sf.fields.index(['RIVER', 'C', 65, 0]) - 1 owner_index = sf.fields.index(['OWN_NAME', 'C', 65, 0]) - 1 type_index = sf.fields.index(['DAM_TYPE', 'C', 10, 0]) - 1 purp_index = sf.fields.index(['PURPOSES', 'C', 254, 0]) - 1 year_index = sf.fields.index(['YR_COMPL', 'C', 10, 0]) - 1 high_index = sf.fields.index(['NID_HEIGHT', 'N', 19, 11]) - 1 mstor_index = sf.fields.index(['MAX_STOR', 'N', 19, 11]) - 1 nstor_index = sf.fields.index(['NORMAL_STO', 'N', 19, 11]) - 1 area_index = sf.fields.index(['SURF_AREA', 'N', 19, 11]) - 1 # iterate through the fields and determine which points are in the box if verbose: print('extracting dams into new file\n') dam_indices = [] i = 0 for record in damrecords: lat = record[lat_index] lon = record[long_index] if self.inside_box([xmin, ymin], [xmax, ymax], [lon, lat]): dam_indices.append(i) i += 1 # write the data from the bbox to a new shapefile w = Writer(output, shapeType=1) for field in sf.fields: w.field(*field) for i in dam_indices: point = sf.shape(i).points[0] w.point(*point) values = damrecords[i] rs = [] for value in values: if isinstance(value, bytes): value = value.decode('utf-8') rs.append(value) w.record(*rs) w.close() if verbose: print('successfully extracted NID dam locations to new file\n')
def extract_bbox(self, bbox, output, verbose = True): """Extracts the NID dam locations for a watershed from the dam shapefile and the 8-digit hydrologic unit code of interest. """ self.download_compressed() xmin, ymin, xmax, ymax = bbox # copy the projection files if verbose: print('copying the projections from the NID source\n') projection = self.source + '.prj' shutil.copy(projection, output + '.prj') # get the dams within the watershed if verbose: print('reading the dam file\n') sf = Reader(self.source, shapeType = 1) # work around for issues with pyshp damrecords = [] for i in range(len(sf.shapes())): try: damrecords.append(sf.record(i)) except: damrecords.append([-100 for i in range(len(sf.fields))]) name_index = sf.fields.index(['DAM_NAME', 'C', 65, 0]) - 1 nid_index = sf.fields.index(['NIDID', 'C', 7, 0]) - 1 long_index = sf.fields.index(['LONGITUDE', 'N', 19, 11]) - 1 lat_index = sf.fields.index(['LATITUDE', 'N', 19, 11]) - 1 river_index = sf.fields.index(['RIVER', 'C', 65, 0]) - 1 owner_index = sf.fields.index(['OWN_NAME', 'C', 65, 0]) - 1 type_index = sf.fields.index(['DAM_TYPE', 'C', 10, 0]) - 1 purp_index = sf.fields.index(['PURPOSES', 'C', 254, 0]) - 1 year_index = sf.fields.index(['YR_COMPL', 'C', 10, 0]) - 1 high_index = sf.fields.index(['NID_HEIGHT', 'N', 19, 11]) - 1 mstor_index = sf.fields.index(['MAX_STOR', 'N', 19, 11]) - 1 nstor_index = sf.fields.index(['NORMAL_STO', 'N', 19, 11]) - 1 area_index = sf.fields.index(['SURF_AREA', 'N', 19, 11]) - 1 # iterate through the fields and determine which points are in the box if verbose: print('extracting dams into new file\n') dam_indices = [] i = 0 for record in damrecords: lat = record[lat_index] lon = record[long_index] if self.inside_box([xmin, ymin], [xmax, ymax], [lon, lat]): dam_indices.append(i) i+=1 # write the data from the bbox to a new shapefile w = Writer(shapeType = 1) for field in sf.fields: w.field(*field) for i in dam_indices: point = sf.shape(i).points[0] w.point(*point) values = damrecords[i] rs = [] for value in values: if isinstance(value, bytes): value = value.decode('utf-8') rs.append(value) w.record(*rs) w.save(output) if verbose: print('successfully extracted NID dam locations to new file\n')
def export_data(self, query): def get_label(item): label = item.descriptor if label is None: return None return label.label def abbrev_to(name, chars, columns_abbrev): if len(name) > chars: n = 1 while True: name_new = name[:chars - len(str(n))] + str(n) if not name_new in columns_abbrev.values(): return name_new n += 1 return name path = as_path(self.url, check_if_exists = False) if path is None: return geometries = [] # [[coords, geometry_type], ...] row_idxs = [] # [row_idx, ...] for row_idx, row in enumerate(query): for column in row: label = get_label(row[column]) if label.__class__.__name__ == "DGeometry": geometries.append(label.coords) row_idxs.append(row_idx) break if not row_idxs: return columns_abbrev = {} # {column: column_abbrev, ...}; abbreviated column names for column in query.columns: column_abbrev = column if len(column_abbrev) > 10: if "." in column_abbrev: column_abbrev = column_abbrev.split(".") column_abbrev = "_".join([abbrev_to(column_abbrev[0], 4, columns_abbrev), abbrev_to(column_abbrev[1], 5, columns_abbrev)]) else: column_abbrev = abbrev_to(column_abbrev, 10, columns_abbrev) column_abbrev = column_abbrev.replace(".", "_") columns_abbrev[column] = column_abbrev shapeType = -1 shape_types = { "POINT": 1, "LINESTRING": 3, "POLYGON": 5, "MULTIPOINT": 8, "POINTZ": 11, "LINESTRINGZ": 13, "POLYGONZ": 15, "MULTIPOINTZ": 18, "POINTM": 21, "LINESTRINGM": 23, "POLYGONM": 25, "MULTIPOINTM": 28, } for _, geometry_type in geometries: if geometry_type not in shape_types: raise Exception("Unknown geometry type") if shapeType > -1: if shape_types[geometry_type] != shapeType: raise Exception("Geometries must be of the same type") else: shapeType = shape_types[geometry_type] sf = Writer(shapeType = shapeType) types = {} # {column: type, ...} shp_types = {bool: "C", int: "N", float: "N", str: "C"} conv_order = ["N", "C"] for row in query: for column in row: label = get_label(row[column]) if label.__class__.__name__ != "DString": continue value = label.try_numeric typ = type(value) typ = shp_types[typ] if typ in shp_types else "C" if (not column in types) or ((typ != types[column]) and (conv_order.index(typ) > conv_order.index(types[column]))): types[column] = typ for column in types: sf.field(columns_abbrev[column], fieldType = types[column], size = "128") for i in range(len(geometries)): row = query[row_idxs[i]] coords = geometries[i][0] if shapeType in [1, 11, 21]: # point types sf.point(*coords[0], shapeType = shapeType) else: sf.poly(shapeType = shapeType, parts = [coords]) if types: record = [] for column in types: label = get_label(row[column]) if label is not None: label = label.value record.append(label) sf.record(*record) sf.save(path)