Beispiel #1
0
def find_flowlines(gagefile, flowfile):
    """Determines the COMIDS of the flowlines in the flowline shapefile that 
    correspond to the USGS gages from the gage shapefile.
    """

    flowlines = Reader(flowfile, shapeType = 3)
    outlets   = Reader(gagefile, shapeType = 1)

    points  = [outlet.points[0] for outlet in outlets.shapes()]
    records = outlets.records()

    lines  = flowlines.shapes()

    # find the indices of closest flowline for each point

    indices = [closest_index(point, lines) for point in points]

    # make a dictionary linking the outlet site index numbers to the 
    # corresponding flowline comids

    comid_index = flowlines.fields.index(['COMID', 'N', 9, 0])  - 1

    comids =[]
    for i in indices:
        if i is not None: comids.append(flowlines.record(i)[comid_index])
        else:             comids.append(None)

    return comids
Beispiel #2
0
def to_geojson_features(shapefilepath):
    '''reads the given shape file ('.shp') and returns it as a list of geojson
    features of the form:
    ```
        {
            "type": "Feature",
            "geometry": {
                "type": "Point",
                "coordinates": [125.6, 10.1]
                },
            "properties": {
                "name": "Dinagat Islands"
            }
        }
    ```
    '''
    shp = Reader(shapefilepath)  # open the shapefile
    shapes = shp.shapes()  # get all the polygons (class shapefile._Shape)
    records = shp.records()
    fields = [field[0] for field in shp.fields[1:]]
    assert len(shapes) == len(records)
    # Reminder: geojson syntax: http://geojson.org/:
    return [
        {
            "type": "Feature",
            'geometry':
            mapping(shape(s)),  # https://stackoverflow.com/a/40631091
            'properties': dict(zip(fields, r))
        } for s, r in zip(shapes, records)
    ]
Beispiel #3
0
def __main__():
    shpdir = '/Users/Theo/' + \
        'Instituten-Groepen-Overleggen/HYGEA/Consult/2017/' + \
        'DEME-julianakanaal/REGIS/Limburg - REGIS II v2.2/shapes'
    shpnm = 'steilrandstukken.shp'
    shpnm = 'Steilrand.shp'
    shpnm = 'SteilrandGebieden.dbf'

    shapefileName =os.path.join(shpdir, shpnm)

    rdr   = Reader(shapefileName)

    fldNms = [p[0] for p in rdr.fields][1:]
    print(fldNms)

    kwargs = {'title': os.path.basename(shapefileName),
              'grid': True,
              'xticks': 1000.,
              'yticks': 1000.,
              'xlabel': 'x RD [m]',
              'ylabel': 'y RD [m]',
              'edgecolor': 'y',
              'facecolor' : 'r',
              'alpha': 0.5}

    plotshapes(rdr, **kwargs)
Beispiel #4
0
def get_coast_polygons(resolution):
    polymeta = []
    polybounds = []
    for level in [1, 2, 3, 5]:
        filename = os.path.join(GSHHS_DIR, 'GSHHS_shp/', resolution,
                                'GSHHS_{}_L{}'.format(resolution, level))
        print filename
        shf = Reader(filename)
        fields = shf.fields
        try:
            shf.shapeRecords()
        except:
            continue
        for shprec in shf.shapeRecords():
            shp = shprec.shape
            rec = shprec.record
            parts = shp.parts.tolist()
            if parts != [0]:
                print 'multipart polygon'
                raise SystemExit
            verts = shp.points
            lons, lats = list(zip(*verts))
            north = max(lats)
            south = min(lats)
            attdict = {}
            for r, key in zip(rec, fields[1:]):
                attdict[key[0]] = r
            area = attdict['area']
            id = attdict['id']
            polymeta.append([level, area, south, north, len(lons), id])
            b = np.empty((len(lons), 2), np.float32)
            b[:, 0] = lons
            b[:, 1] = lats
            if lsd is not None:
                b = quantize(b, lsd)
            polybounds.append(b)

        # Manual fix for incorrect Antarctica polygons at full resolution
        # This issue is only present in the shapefile version and may be fixed
        # in future versions of GSHHS!
        if resolution == 'f' and level == 5:
            i = [item[-1] for item in polymeta].index('4-E')
            coords = polybounds[i][2:-1, :]
            coords = np.vstack([coords, [180.0, -90.0],
                                [0.0, -90.0]]).astype(np.float32)
            polybounds[i] = coords
            polymeta[i][-2] = len(coords)

            j = [item[-1] for item in polymeta].index('4-W')
            coords = polybounds[j][3:, :]
            np.savetxt('coordinates.txt', coords)
            coords = np.vstack([
                coords, [0.0, coords[-1][1]], [0.0, -90.0], [-180.0, -90.0],
                coords[0]
            ]).astype(np.float32)

            polybounds[j] = coords
            polymeta[j][-2] = len(coords)

    return polybounds, polymeta
Beispiel #5
0
def main(shp):
    shp, _ = os.path.splitext(shp)
    with IO() as shpio, IO() as dbfio:  # Don't overwrite existing .shp, .dbf
        with Reader(shp) as r, Writer(shp=shpio, dbf=dbfio, shx=shp+'.shx') as w:
            w.fields = r.fields[1:]  # skip first deletion field
            for rec in r.iterShapeRecords():
                w.record(*rec.record)
                w.shape(rec.shape)
Beispiel #6
0
def merge_shapes(
    inputfile,
    outputfile=None,
    overwrite=False,
    verbose=True,
    vverbose=False,
):
    """
    Merges all the shapes in a shapefile into a single shape.
    """

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose:
            print('combined watershed shapefile {} exists'.format(outputfile))
        return

    if verbose:
        print('combining shapes from {}\n'.format(inputfile) +
              'this may take a while...\n')

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType=5)

    try:

        combined = combine_shapes(r.shapes(), verbose=vverbose)

    except:

        print('error: unable to combine shapes')
        raise

    # create the new file with the merged shapes

    w = Writer(shapeType=5)

    w.poly(shapeType=5, parts=[combined])

    # copy the fields from the original and then the first record; note this
    # can be adapted as needed

    for field in r.fields:
        w.field(*field)
    w.record(*r.record(0))

    w.save(outputfile)

    if verbose:

        its = inputfile, outputfile
        print('successfully combined shapes from {} to {}\n'.format(*its))
Beispiel #7
0
	def load(self):
		
		def _get_points(shape):
			
			points = shape.points
			if hasattr(shape, "z"):
				x, y = zip(*points)
				return list(zip(x, y, shape.z))
			return points
		
		self._columns = {}
		self._data = []
		
		path = as_path(self.url)
		if path is None:
			return False
		
		# get SRID
		srid = -1
		srid_vertical = -1
		# TODO try to discover SRID from .prj file
		
		# get geometries & data
		geo_column = "Geometry"
		sf = Reader(path)
		names = []
		for idx, name in enumerate(sf.fields[1:]):
			self._columns[idx + 1] = name[0]
			names.append(name[0])
		while geo_column in names:
			geo_column = geo_column + "_"
		
		shp_type = sf.shape(0).shapeType
		shapes_wkt = { # WKT formatting for geometric shapes
			1: "POINT(%s)",
			3: "LINESTRING(%s)",
			5: "POLYGON((%s))",
			8: "MULTIPOINT(%s)",
			11: "POINTZ(%s)",
			13: "LINESTRINGZ(%s)",
			15: "POLYGONZ((%s))",
			18: "MULTIPOINTZ(%s)",
			21: "POINTM(%s)",
			23: "LINESTRINGM(%s)",
			25: "POLYGONM((%s))",
			28: "MULTIPOINTM(%s)",
		}
		if not shp_type in shapes_wkt:
			raise Exception("Unrecognized shapefile type")
		
		geometries = [shapes_wkt[shp_type] % ", ".join([" ".join([str(p) for p in point]) for point in _get_points(shape)]) for shape in sf.shapes()]
		# geometries = [wkt definition, ...] in order of records
		for i, record in enumerate(sf.records()):
			self._data.append(dict([(idx, DString(str(record[idx - 1]).strip())) for idx in self._columns]))
			self._data[-1][0] = DGeometry(geometries[i], srid = srid, srid_vertical = srid_vertical)
		self._columns[0] = geo_column
		
		return True
Beispiel #8
0
    def extract_catchments(
        self,
        source,
        destination,
        flowlinefile,
        verbose=True,
    ):
        """
        Extracts the catchments from the source data file to the destination
        using the list of comids for the query.
        """

        # make a list of the comids

        comids = self.get_comids(flowlinefile)

        # open the catchment shapefile

        if verbose: print('reading the catchment shapefile\n')

        shapefile = Reader(source)

        # get the index of the feature id, which links to the flowline comid

        featureid_index = shapefile.fields.index(['FEATUREID', 'N', 9, 0]) - 1

        # go through the comids from the flowlines and add the corresponding
        # catchment to the catchment list

        if verbose: print('searching the catchments in the watershed\n')

        records = shapefile.records()
        indices = []

        i = 0
        for record in records:
            if record[featureid_index] in comids: indices.append(i)
            i += 1

        if len(indices) == 0:
            print('query returned no values, returning\n')
            raise

        # create the new shapefile

        if verbose: print('writing the new catchment shapefile\n')

        w = Writer()

        for field in shapefile.fields:
            w.field(*field)

        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType=5, parts=[shape.points])
            w.record(*records[i])

        w.save(destination)
Beispiel #9
0
    def extract_flowlines(self, source, destination, HUC8, verbose = True):
        """Extracts flowlines from the source datafile to the destination using
        the HUC8 for the query."""

        # open the flowline file
    
        if verbose: print('reading the flowline file\n')
    
        shapefile = Reader(source, shapeType = 3)
        records   = shapefile.records()
    
        # figure out which field codes are the Reach code and comid
    
        reach_index = shapefile.fields.index(['REACHCODE', 'C', 14, 0]) - 1
    
        # go through the reach indices, add add them to the list of flowlines
        # if in the watershed; also make a list of the corresponding comids
    
        if verbose: print('searching for flowlines in the watershed\n')
    
        indices = []
       
        i = 0
        for record in records:
            if record[reach_index][:8] == HUC8: indices.append(i)
            i+=1

        if len(indices) == 0:
            if verbose: print('error: query returned no values')
            raise
    
        # write the data from the HUC8 to a new shapefile
    
        w = Writer(shapeType = 3)
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 3, parts = [shape.points])
    
            record = records[i]
    
            # little work around for blank GNIS_ID and GNIS_NAME values
    
            if isinstance(record[3], bytes):
                record[3] = record[3].decode('utf-8')
            if isinstance(record[4], bytes):
                record[4] = record[4].decode('utf-8')
    
            w.record(*record)
    
        w.save(destination)
    
        if verbose: 
            l = len(indices)
            print('queried {} flowlines from original shapefile\n'.format(l))
Beispiel #10
0
def pull_data():
    roads = Reader(shproot + "roads_wgs.shp")
    print("HEADER:", "\n".join(str(f) for f in roads.fields), sep="\n")
    shaperecs = roads.shapeRecords()

    with open(projectroot + "foutkm.csv") as infl:
        lines = [line.split("\t") for line in infl]

    return shaperecs
Beispiel #11
0
    def set_metadata(
        self,
        gagefile,
    ):
        """
        Opens the gage file with the station metadata.
        """

        # metadata for stations

        self.gages = []
        self.day1s = []
        self.dayns = []
        self.drains = []
        self.states = []
        self.sites = []
        self.nwiss = []
        self.aves = []
        self.names = []

        gagereader = Reader(gagefile, shapeType=1)

        # get the fields with pertinent info

        day1_index = gagereader.fields.index(['DAY1', 'N', 19, 0]) - 1
        dayn_index = gagereader.fields.index(['DAYN', 'N', 19, 0]) - 1
        drain_index = gagereader.fields.index(['DA_SQ_MILE', 'N', 19, 2]) - 1
        HUC8_index = gagereader.fields.index(['HUC', 'C', 8, 0]) - 1
        state_index = gagereader.fields.index(['STATE', 'C', 2, 0]) - 1
        site_index = gagereader.fields.index(['SITE_NO', 'C', 15, 0]) - 1
        nwis_index = gagereader.fields.index(['NWISWEB', 'C', 75, 0]) - 1
        ave_index = gagereader.fields.index(['AVE', 'N', 19, 3]) - 1
        name_index = gagereader.fields.index(['STATION_NM', 'C', 60, 0]) - 1

        # iterate through the records

        for r in gagereader.records():

            gage = r[site_index]
            day1 = r[day1_index]
            dayn = r[dayn_index]
            drain = r[drain_index]
            state = r[state_index]
            nwis = r[nwis_index]
            ave = r[ave_index]
            name = r[name_index]
            site = r[site_index]

            self.gages.append(gage)
            self.day1s.append(day1)
            self.dayns.append(dayn)
            self.drains.append(drain)
            self.states.append(state)
            self.sites.append(site)
            self.nwiss.append(nwis)
            self.aves.append(ave)
            self.names.append(name)
Beispiel #12
0
def extract_nsrdb(directory,
                  HUC8,
                  start,
                  end,
                  space=0.1,
                  plot=True,
                  verbose=True,
                  vverbose=False):
    """Makes pickled instances of the GageStation class for all the gages
    meeting the calibration criteria for an 8-digit watershed."""

    if verbose: print('\nextracting solar radiation data from NREL\n')

    # paths for the watershed shapefiles

    boundaryfile = '{0}/{1}/{1}boundaries'.format(directory, HUC8)
    solarfile = '{0}/{1}/{1}solarstations'.format(directory, HUC8)

    # make a folder for the files

    d = '{0}/{1}/NSRDB'.format(directory, HUC8)
    if not os.path.isdir(d): os.mkdir(d)

    boundaryreader = Reader(boundaryfile)

    stations = []
    while len(stations) == 0:

        bbox = get_boundaries(boundaryreader.shapes(), space=space)
        stations = find_nsrdb(bbox, dates=(start, end))
        space += 0.2

    # download the data

    print('')
    for station in stations:

        if not os.path.isfile('{}/{}'.format(d, station.usaf)):

            station.download_data(d, dates=(start, end))

    # plot it up

    from pyhspf.preprocessing.climateplots import plot_nsrdb

    for station in stations:

        p = '{}/{}'.format(d, station.usaf)
        if not os.path.isfile(p + '.png'):

            with open(p, 'rb') as f:
                s = pickle.load(f)

            try:
                plot_nsrdb(s, start, end, output=p)
            except:
                print('unable to plot', s.station)
Beispiel #13
0
def extract_precip3240(directory,
                       HUC8,
                       start,
                       end,
                       NCDC='ftp://ftp.ncdc.noaa.gov/pub/data',
                       clean=False,
                       space=0.2,
                       verbose=True):
    """Makes a point shapefile of the stations from a csv file of hourly 
    precipitation data from NCDC within the bounding box of the watershed."""

    if os.name == 'nt': decompress = decompress7z
    else: decompress = decompresszcat

    d = '{}/{}/precip3240'.format(directory, HUC8)
    if not os.path.isdir(d): os.mkdir(d)

    # open up the bounding box for the watershed

    boundaryfile = '{0}/{1}/{1}boundaries'.format(directory, HUC8)

    boundaryreader = Reader(boundaryfile)

    bbox = get_boundaries(boundaryreader.shapes(), space=space)

    # find the precipitation stations in the bounding box

    stations = find_precip3240(bbox, verbose=verbose)

    if verbose: print('')

    # make a list of all the states since that's how the NCDC data are stored

    states = list(set([s.code for s in stations]))

    # download the state data for each year

    for state in states:
        download_state_precip3240(state, d, verbose=verbose)

    archives = [
        '{}/{}'.format(d, a) for a in os.listdir(d) if a[-6:] == '.tar.Z'
    ]

    for a in archives:

        # decompress the archive

        if not os.path.isfile(a[:-2]):
            decompress(a, d)
            if verbose: print('')

    # import the data

    for station in stations:
        station.import_data(d, start, end)
Beispiel #14
0
def extract_raw(source, destination, HUC8, plot=True, save=True, verbose=True):
    """Extracts the grid data for the HUC8."""

    # make a new directory for the HUC8

    d = '{}/{}/NRCM'.format(destination, HUC8)

    if not os.path.isdir(d): os.mkdir(d)

    # make a "raw directory" for the unaltered info

    raw = '{}/raw'.format(d)

    if not os.path.isdir(raw):
        os.mkdir(raw)
        if verbose: print('extracting NRCM predictions...\n')

    # use the boundary file to find the bounding box for the grid points

    boundaryfile = '{0}/{1}/{1}boundaries'.format(destination, HUC8)
    subbasinfile = '{0}/{1}/{1}subbasins'.format(destination, HUC8)
    space = 0.1

    sf = Reader(boundaryfile)

    bbox = get_boundaries(sf.shapes(), space=space)

    xmin, ymin, xmax, ymax = bbox

    if verbose and not os.path.isdir(raw):
        print('bounding box =', xmin, ymin, xmax, ymax, '\n')

    lats, lons = [], []
    for f in os.listdir(source):
        i = f.index('_')
        lon = float(f[:i])
        lat = float(f[i + 1:])

        if inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
            lats.append(lat)
            lons.append(lon)

            if not os.path.isfile('{}/{}'.format(raw, f)):
                shutil.copy('{}/{}'.format(source, f), '{}/{}'.format(raw, f))

    if plot:
        if save: output = '{}/gridpoints'.format(d)
        else: output = None
        if not os.path.isfile(output):
            plot_NRCM(lons,
                      lats,
                      bfile=boundaryfile,
                      sfile=subbasinfile,
                      output=output,
                      show=False)
Beispiel #15
0
def plot_NRCM(lons,
              lats,
              bfile=None,
              sfile=None,
              space=0.05,
              show=False,
              output=None):

    fig = pyplot.figure()

    sub = fig.add_subplot(111, aspect='equal')
    sub.set_title('Nested Regional Climate Model Grid Points')
    sub.scatter(lons, lats, marker='+', c='r', s=40)

    if bfile is not None:

        sf = Reader(bfile)
        boundary = sf.shape(0).points
        sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width=1.2))

    if sfile is not None:

        sf = Reader(sfile)

        for s in sf.shapes():
            boundary = s.points
            sub.add_patch(make_patch(boundary, (1, 0, 0, 0), width=0.2))

    sub.set_xlabel('Longitude, Decimal Degrees', size=13)
    sub.set_ylabel('Latitude, Decimal Degrees', size=13)

    xmin, ymin, xmax, ymax = get_boundaries(sf.shapes(), space=space)

    pyplot.xlim([xmin, xmax])
    pyplot.ylim([ymin, ymax])

    if output is not None: pyplot.savefig(output)

    if show: pyplot.show()

    pyplot.clf()
    pyplot.close()
Beispiel #16
0
def get_wdb_boundaries(resolution, level, rivers=False):
    polymeta = []
    polybounds = []
    if rivers:
        filename = os.path.join(
            GSHHS_DIR, 'WDBII_shp', resolution,
            'WDBII_river_{}_L{:02}'.format(resolution, level))
    else:
        filename = os.path.join(
            GSHHS_DIR, 'WDBII_shp', resolution,
            'WDBII_border_{}_L{}'.format(resolution, level))
    print filename
    shf = Reader(filename)
    fields = shf.fields
    for shprec in shf.shapeRecords():
        shp = shprec.shape
        rec = shprec.record
        parts = shp.parts.tolist()
        if parts != [0]:
            print 'multipart polygon'
            raise SystemExit

        verts = shp.points
        # Detect degenerate lines that are actually points...
        if len(verts) == 2 and np.allclose(verts[0], verts[1]):
            print 'Skipping degenerate line...'
            continue

        lons, lats = list(zip(*verts))
        north = max(lats)
        south = min(lats)
        attdict = {}
        for r, key in zip(rec, fields[1:]):
            attdict[key[0]] = r
        area = -1
        poly_id = attdict['id']
        b = np.empty((len(lons), 2), np.float32)
        b[:, 0] = lons
        b[:, 1] = lats

        if not rivers:
            b = interpolate_long_segments(b, resolution)

        if lsd is not None:
            b = quantize(b, lsd)

        polymeta.append([-1, -1, south, north, len(b), poly_id])
        polybounds.append(b)

    return polybounds, polymeta
Beispiel #17
0
    def get_comids(self, flowlinefile):
        """Finds the comids from the flowline file."""

        # open the file

        shapefile = Reader(flowlinefile)

        # find the index of the comids

        comid_index = shapefile.fields.index(['COMID', 'N', 9,  0]) - 1

        # make a list of the comids

        comids = [r[comid_index] for r in shapefile.records()]

        return comids
Beispiel #18
0
def load():
    # Determine paths.
    file_dir = os.path.dirname(os.path.abspath(__file__))
    locs_path = os.path.join(file_dir, 'data', 'IDSTA.shp')
    data_path = os.path.join(file_dir, 'data', 'HRtemp2006.txt')

    # Load locations.
    sf = Reader(locs_path)
    names, lons, lats = [], [], []
    for sr in sf.shapeRecords():
        name = sr.record.as_dict()['IDT_AK']
        lon, lat = sr.shape.points[0]
        names.append(name)
        lons.append(lon)
        lats.append(lat)
    locs = pd.DataFrame({
        'lon': lons,
        'lat': lats
    },
                        index=pd.Index(names, name='node'))

    # Read data.
    df = pd.read_csv(data_path, sep='\t')

    # Rename things.
    df = pd.DataFrame({
        'node': df['IDT_AK'],
        'date': df['DATE'],
        'temp': df['MDTEMP']
    })

    # Make columns nodes.
    df = df.set_index(['date', 'node']).unstack('node')['temp']

    # Drop outputs with missing values, which are only a few.
    df = df.dropna(axis=1)

    # Parse dates and convert to day in the year 2006.
    xs = [datetime.datetime.strptime(x, '%Y-%m-%d') for x in df.index]
    start = datetime.datetime(year=2006, month=1, day=1)
    df['day'] = [(x - start).total_seconds() / 3600 / 24 + 1 for x in xs]
    df = df.set_index('day').sort_index()

    # Filter locations by kept nodes.
    locs = locs.reindex(df.columns, axis=0)

    return locs, df
Beispiel #19
0
    def extract_shapefile(self, shapefile, output):
        """Extracts the dams within the bounding box of the shapefile."""

        if not os.path.isfile(output + '.shp'):

            r = Reader(shapefile)

            bboxes = [r.shape(i).bbox for i in range(len(r.records()))]

            xmin = min([w for w, x, y, z in bboxes])
            ymin = min([x for w, x, y, z in bboxes])
            xmax = max([y for w, x, y, z in bboxes])
            ymax = max([z for w, x, y, z in bboxes])

            self.extract_bbox([xmin, ymin, xmax, ymax], output)

        else:
            print('dam shapefile exists\n')
def extraction_poste(chemin):
    '''Source : https://datanova.legroupe.laposte.fr/explore/dataset/laposte_hexasmal/?disjunctive.code_commune_insee&disjunctive.nom_de_la_commune&disjunctive.code_postal&disjunctive.libell_d_acheminement&disjunctive.ligne_5
    Nom fichier : laposte_hexasmal
    Données :
       nom de la commune
       code INSEE
       précision du lieu dit
       le code postal de la commune
       le libellé acheminement 
       lieu dit
    '''
    sf = Reader(chemin)
    shapeRecs = sf.shapeRecords()
    codes_postaux = []
    for com in shapeRecs:
        info = com.record
        codes_postaux.append(info)
    return codes_postaux
Beispiel #21
0
    def extract_shapefile(self, shapefile, output):
        """Extracts the dams within the bounding box of the shapefile."""

        if not os.path.isfile(output + '.shp'):

            if os.path.isfile(shapefile + '.shp'):

                r = Reader(shapefile)

            else:

                print('error: shapefile {} does not exist'.format(shapefile))
                raise

            self.extract_bbox(r.bbox, output)

        else:
            print('dam shapefile {} exists\n'.format(output))
def extraction_geofla_commune(chemin):
    sf = Reader(chemin)
    shapeRecs = sf.shapeRecords()
    points = shapeRecs[5].shape.points
    record = shapeRecs[5].record
    liste_communes = []
    for com in shapeRecs:
        info = com.record
        insee_com = info[2]
        nom_com_maj = info[3]
        nom_com_min = info[3]
        if b"Capitale d'\xe9tat" == info[4]:
            status = "capitale"
        elif b"Pr\xe9fecture de d\xe9partement" == info[4]:
            status = "prefecture_departement"
        elif "Commune simple" == info[4]:
            status = "commune_simple"
        elif b'Sous-pr\xe9fecture' == info[4]:
            status = "sous-prefecture"
        elif b'Pr\xe9fecture de r\xe9gion' == info[4]:
            status = "prefecture_region"
        x_chf_lieu, y_chf_lieu =  transform(lambert_93, wgs_84, info[5], info[6])
        x_centroid, y_centroid =  transform(lambert_93, wgs_84, info[7], info[8])
        z_moyen = info[9]
        superficie = info[10]
        population = info[11]
        code_arr = info[12]
        code_dep = info[13]
        code_reg = info[15]
        limite = conversion_lambert93_wgs84(com.shape.points)
        limite_com = creer_commune_json(info, limite, [y_centroid, x_centroid])
        commune = {"insee_com" : insee_com, "nom_com_maj" : nom_com_maj,\
                   "nom_com_min" : nom_com_min, "status" : status,\
                   "x_chf_lieu" : x_chf_lieu, "y_chf_lieu" : y_chf_lieu,\
                   "x_centroid" : x_centroid, "y_centroid" : y_centroid,\
                   "z_min" : z_moyen, "z_max" : z_moyen,\
                   "z_moyen" : z_moyen, "superficie" : superficie,\
                   "population" : population, "code_arr" : code_arr,\
                   "code_dep" : code_dep, "code_reg" : code_reg,\
                   "limite_com" : limite_com, "code_postal" : insee_com}
        #print(commune)
        liste_communes.append(commune)
    return liste_communes
Beispiel #23
0
def extraction_shp_departement(chemin):
    '''Structure info_departement
     0  :   Numéro departement
     1  :   Nom departement en majuscule
     2  :   Code geographique de la prefecture
     3  :   Code region
     4  :   limite en coordonne wgs84'''
    sf = Reader(chemin)
    shapeRecs = sf.shapeRecords()
    points = shapeRecs[5].shape.points
    record = shapeRecs[5].record
    info_departement = []
    for dep in shapeRecs:
        info = dep.record
        limite = conversion_lambert93_wgs84_dep(dep.shape.points)
        departement_json = creer_departement_json(info, limite)
        info_departement.append([info[1], info[2], info[3], info[9],\
            departement_json])
    return info_departement
Beispiel #24
0
    def find_NED(self, catchmentfile):
        """Parses the elevation rasters to find the one where the HUC8 is
        located."""

        shapefile = Reader(catchmentfile)

        f = None
        for nedfile in self.nedfiles:

            t,v = get_raster_table(nedfile, shapefile.bbox, 'int32', 
                                   quiet = True)

            if t is not None: 
                f = nedfile
                break

        if f is not None: 
            return f
        else:
            print('warning: unable to find NED file')
            raise
Beispiel #25
0
def trim_shapefile(
    in_path: Union[Path, str],
    join_on: str,
    include: list,
    out_path: Union[Path, str, None] = None,
) -> Union[Path, str]:
    """Trims a shapefile to only include shapes that match the given criteria.

    Shapes will be discarded unless their 'join_on' property is contained in the
    'include' list.
    """

    # Resolve the shapefile path (allows in_path to point to directory with same
    # name as nested shapefile)
    in_path = resolve_shapefile_path(in_path)

    # Construct new name if it was not provided
    if out_path is None:
        out_path = in_path.with_name(f"{in_path.name}_trimmed{in_path.suffix}")

    with Reader(str(in_path)) as r, Writer(str(out_path)) as w:

        w.fields = r.fields[1:]  # don't copy deletion field

        if join_on not in [f[0] for f in w.fields]:
            raise ValueError(f"'join_on'={join_on} not in shapefile fields: {w.fields}")

        # Copy features if they match the criteria
        for feature in r.iterShapeRecords():
            if feature.record[join_on] in include:
                w.record(*feature.record)
                w.shape(feature.shape)

    # PyShp doesn't manage .prj file, must copy manually.
    in_prj = in_path.with_suffix(".prj")
    if in_prj.exists():
        out_prj = out_path.with_suffix(".prj")
        shutil.copy(in_prj, out_prj)

    return out_path
def extraction_geofla_departement(chemin):
    sf = Reader(chemin)
    shapeRecs = sf.shapeRecords()
    points = shapeRecs[5].shape.points
    record = shapeRecs[5].record
    liste_departements = []
    for dep in shapeRecs:
        info = dep.record
        code_dep = info[1]
        nom_dep_maj = info[2]
        nom_dep_min = info[2]
        numero_insee_prefecture = info[1] + info[3] 
        x_centroid, y_centroid = transform(lambert_93, wgs_84, info[7], info[8])
        code_reg = info[9]
        limite = conversion_lambert93_wgs84(dep.shape.points)
        limite_dep = creer_departement_json(info, limite, [y_centroid, x_centroid])
        departement = {"code_dep" : code_dep, "nom_dep_maj": nom_dep_maj,\
                       "nom_dep_min" : nom_dep_min, "numero_insee_prefecture" : numero_insee_prefecture,\
                       "x_centroid" : x_centroid, "y_centroid" : y_centroid,\
                       "code_reg" : code_reg, "limite_dep" : limite_dep}
        liste_departements.append(departement)
    return liste_departements
Beispiel #27
0
    def get_boundaries(
        self,
        bbox=None,
        shapefile=None,
        space=0,
    ):
        """Gets the boundaries for the plot."""

        if bbox is not None: boundaries = [x for x in bbox]
        elif shapefile is not None:
            r = Reader(shapefile)
            boundaries = [b for b in r.bbox]
        else:
            print('error: no information provided')
            raise

        xmin = boundaries[0] - (boundaries[2] - boundaries[0]) * space
        ymin = boundaries[1] - (boundaries[3] - boundaries[1]) * space
        xmax = boundaries[2] + (boundaries[2] - boundaries[0]) * space
        ymax = boundaries[3] + (boundaries[3] - boundaries[1]) * space

        return xmin, ymin, xmax, ymax
Beispiel #28
0
    def extract_shapefile(
        self,
        shapefile,
        directory,
        space=0.05,
    ):
        """Extracts the cropland data for the bounding box of the shapefile."""

        if not os.path.isdir(directory):
            print('error, specified output directory does not exist\n')
            raise

        r = Reader(shapefile)

        xmin, ymin, xmax, ymax = r.bbox

        # adjust to make the map just larger than the extents

        xmin, xmax = xmin - space * (xmax - xmin), xmax + space * (xmax - xmin)
        ymin, ymax = ymin - space * (ymax - ymin), ymax + space * (ymax - ymin)

        self.extract_bbox((xmin, ymin, xmax, ymax), directory)
Beispiel #29
0
def make_timeseries(directory, HUC8, start, end, evapstations = None, 
                    plot = True):
    """Makes an hourly timeseries of the reference evapotranspiration using
    the ASCE hourly Penman-Monteith Equation."""

    nrcm = '{}/{}/NRCM'.format(directory, HUC8)

    # start and end datetime instances

    s = datetime.datetime(start, 1, 1)
    e = datetime.datetime(end,   1, 1)

    # average the time series together from the NRCM simulation

    average_timeseries(nrcm)

    # open the watershed info to use to make subbasin precipitation

    watershedfile = '{}/{}/watershed'.format(directory, HUC8)

    with open(watershedfile, 'rb') as f: watershed = pickle.load(f)

    make_precipitation(watershed.subbasins, nrcm)

    # convert temperature and humidity to dewpoint

    make_dewpoint('{}/{}/NRCM/averages'.format(directory, HUC8))

    # open the 3-hr temperature, solar, and dewpoint, and daily wind files

    tempfile  = '{}/averages/average_temperature'.format(nrcm)
    solarfile = '{}/averages/average_solar'.format(nrcm)
    dewfile   = '{}/averages/average_dewpoint'.format(nrcm)
    windfile  = '{}/averages/average_wind'.format(nrcm)

    # watershed timeseries

    output = '{}/watershedtimeseries'.format(nrcm)

    if not os.path.isdir(output): os.mkdir(output)

    hourlytemp  = '{}/hourlytemperature'.format(output)
    hourlysolar = '{}/hourlysolar'.format(output)
    dailydew    = '{}/dewpoint'.format(output)
    dailywind   = '{}/wind'.format(output)
    hourlyRET   = '{}/hourlyRET'.format(output)
    hourlyPETs  = '{}/hourlyPETs'.format(output)
   
    if not os.path.isfile(hourlyRET):

        print('calculating an hourly time series for the reference ET...\n')

        # open the bounding box and get the mean lat, lon, and elevation

        f  = '{0}/{1}/{1}boundaries'.format(directory, HUC8)
        sh = Reader(f)

        record = sh.record(0)
        lon, lat, elev = record[-3:]

        with open(windfile,  'rb') as f: ts, Ws   = zip(*pickle.load(f))
        with open(tempfile,  'rb') as f: ts, Ts   = zip(*pickle.load(f))
        with open(solarfile, 'rb') as f: ts, Ss   = zip(*pickle.load(f))
        with open(dewfile,   'rb') as f: ts, dews = zip(*pickle.load(f))

        # dump the daily series

        with open(dailydew,  'wb') as f: 
            pickle.dump((s, 1440, list(dews)), f)
        with open(dailywind, 'wb') as f: 
            pickle.dump((s, 1440, list(Ws)), f)

        # dump all the hourly series and convert the solar radiation 
        # from Watts/m2 to MJ/hour/m2

        temp  = [T for T in Ts for i in range(3)]
        solar = [S for S in Ss for i in range(3)]

        with open(hourlysolar, 'wb') as f: pickle.dump((s, 60, solar), f)
        with open(hourlytemp,  'wb') as f: pickle.dump((s, 60, temp),  f)

        # convert to hourly numpy arrays

        temp     = numpy.array(temp)
        solar    = numpy.array(solar) * 3600 / 10**6
        wind     = numpy.array([w for w in Ws   for i in range(24)])
        dewpoint = numpy.array([T for T in dews for i in range(24)])

        # dates

        dates = [s + i * datetime.timedelta(hours = 1) 
                 for i in range(len(solar))]
 
        RET = penman_hourly(lat, lon, elev, dates, temp, dewpoint, solar, wind,
                            verbose = False)

        # dump the timeseries

        with open(hourlyRET, 'wb')   as f: pickle.dump((s, 60, RET), f)

    if not os.path.isfile(hourlyRET + '.png'):
        with open('{}/hourlytemperature'.format(output), 'rb') as f: 
            s, t, temp = pickle.load(f)
        with open('{}/dewpoint'.format(output), 'rb') as f: 
            s, t, dewpoint = pickle.load(f)
        with open('{}/wind'.format(output), 'rb') as f: 
            s, t, wind = pickle.load(f)
        with open('{}/hourlysolar'.format(output), 'rb') as f: 
            s, t, solar = pickle.load(f)
        with open(hourlyRET, 'rb') as f: 
            s, t, hRET = pickle.load(f)

        # Watts/m2 to kW hr/m2

        solar = [s * 0.024 for s in solar]

        if evapstations is not None:
            with open(evapstations, 'rb') as f: evaporations = pickle.load(f)
        else:
            evaporations = {}

        plot_hourlyET(HUC8, s, e, evaporations, [hRET], temp,
                      dewpoint, wind, solar, fill = True, 
                      colors = ['green', 'yellow', 'orange', 'red'],
                      output = hourlyRET)

    if not os.path.isfile(hourlyPETs):
        calculate_cropPET(directory, HUC8, s, e, output = output,
                          evaporations = False)
Beispiel #30
0
def readshapefile(shapefile, default_encoding='utf-8'):
    """
    """
    import shapefile as shp
    from shapefile import Reader
    shp.default_encoding = default_encoding
    if not os.path.exists('%s.shp' % shapefile):
        raise IOError('cannot locate %s.shp' % shapefile)
    if not os.path.exists('%s.shx' % shapefile):
        raise IOError('cannot locate %s.shx' % shapefile)
    if not os.path.exists('%s.dbf' % shapefile):
        raise IOError('cannot locate %s.dbf' % shapefile)
    # open shapefile, read vertices for each object, convert
    # to map projection coordinates (only works for 2D shape types).
    try:
        shf = Reader(shapefile, encoding=default_encoding)
    except:
        raise IOError('error reading shapefile %s.shp' % shapefile)
    fields = shf.fields
    coords = []
    attributes = []

    shptype = shf.shapes()[0].shapeType
    bbox = shf.bbox.tolist()
    info = (shf.numRecords, shptype, bbox[0:2] + [0., 0.], bbox[2:] + [0., 0.])
    npoly = 0
    for shprec in shf.shapeRecords():
        shp = shprec.shape
        rec = shprec.record
        npoly = npoly + 1
        if shptype != shp.shapeType:
            print(shapefile)
            raise ValueError(
                'readshapefile can only handle a single shape type per file')
        if shptype not in [1, 3, 5, 8]:
            raise ValueError('readshapefile can only handle 2D shape types')
        verts = shp.points
        if shptype in [1, 8]:  # a Point or MultiPoint shape.
            lons, lats = list(zip(*verts))
            if max(lons) > 721. or min(lons) < -721. or max(
                    lats) > 90.01 or min(lats) < -90.01:
                raise ValueError("经纬度范围超出可能值范围")
            # if latitude is slightly greater than 90, truncate to 90
            lats = [max(min(lat, 90.0), -90.0) for lat in lats]
            if len(verts) > 1:  # MultiPoint
                x = lons
                y = lats
                coords.append(list(zip(x, y)))
            else:  # single Point
                x = lons[0]
                y = lats[0]
                coords.append((x, y))
            attdict = {}
            for r, key in zip(rec, fields[1:]):
                attdict[key[0]] = r
            attributes.append(attdict)
        else:  # a Polyline or Polygon shape.
            parts = shp.parts.tolist()
            ringnum = 0
            for indx1, indx2 in zip(parts, parts[1:] + [len(verts)]):
                ringnum = ringnum + 1
                lons, lats = list(zip(*verts[indx1:indx2]))
                if max(lons) > 721. or min(lons) < -721. or max(
                        lats) > 90.01 or min(lats) < -90.01:
                    raise ValueError("经纬度范围超出可能值范围")
                # if latitude is slightly greater than 90, truncate to 90
                lats = [max(min(lat, 90.0), -90.0) for lat in lats]
                #x, y = mp.projtran(lons, lats)  #此处引入投影
                x = lons
                y = lats
                coords.append(list(zip(x, y)))
                attdict = {}
                for r, key in zip(rec, fields[1:]):
                    attdict[key[0]] = r
                # add information about ring number to dictionary.
                attdict['RINGNUM'] = ringnum
                attdict['SHAPENUM'] = npoly
                attributes.append(attdict)
    # draw shape boundaries for polylines, polygons  using LineCollection.
    return coords