Example #1
0
def main(args):
    'Runs calculation chain'
    try:
        action = args[1]
        config = args[2]
    except IndexError:
        action = config = None

    inm_log = InmarsatLog.from_csv('data', 'inmarsat-su-log-redacted.csv')

    time_step = timedelta(seconds=10)
    bin_log = inm_log.bin_data(time_step)
    traj_time_step = time_step / 2

    traj = Trajectory.from_csv('data',
                               acars='acars.csv',
                               adsb='all-combined.csv',
                               radar='route.csv')
    int_traj = traj.int_data(bin_log.data[0].time - traj_time_step,
                             traj_time_step)

    r_btos = RadialDistance.from_bto(bin_log.data)
    r_known = RadialDistance.from_traj(int_traj.data)
    r_flight = r_known.append(r_btos).take_after(T0['take-off'])

    trend_times = [(2014, 3, 7, 16, 42, 0), (2014, 3, 7, 17, 23, 0),
                   (2014, 3, 7, 18, 28, 15), (2014, 3, 7, 19, 41, 5),
                   (2014, 3, 7, 20, 41, 5), (2014, 3, 7, 21, 41, 25),
                   (2014, 3, 7, 22, 41, 25), (2014, 3, 8, 0, 19, 35)]

    r_trend = r_flight.filter_by_list(
        [datetime(*item) for item in trend_times])
    r_interp = r_flight.interpolate(r_trend, T0['off-radar'])

    if action == 'polygon':
        with open(config, 'rt') as cfg_file:
            cfg = load(cfg_file)
        from shapefile import Writer, POLYGON
        shp_file = Writer(POLYGON)
        shp_file.field('TIME', 'C', '20')
        for contour in cfg['times']:
            contour_pts = []
            ind_a = 0
            ind_b = 1
            contour_time = datetime(*contour['time'])
            if contour_time > r_interp.data[-1][0]:
                contour_time = r_interp.data[-1][0]
            for offset in cfg['time_delta']:
                time_offset = timedelta(minutes=offset)
                cur_time = contour_time + time_offset
                contour_pts += [
                    r_interp.find_loc(cur_time, lat) for lat in frange(
                        contour['lat_bounds'][ind_a], contour['lat_bounds']
                        [ind_b], cfg['lat_step'] * (ind_b - ind_a))
                ]
                ind_a, ind_b = ind_b, ind_a
            shp_file.poly(parts=[contour_pts])
            poly_name = '{}_{}_{}'.format(*contour['time'][2:5])
            shp_file.record(poly_name)
        shp_file.save(cfg['save_to_file'])
Example #2
0
    def extract_catchments(
        self,
        source,
        destination,
        flowlinefile,
        verbose=True,
    ):
        """
        Extracts the catchments from the source data file to the destination
        using the list of comids for the query.
        """

        # make a list of the comids

        comids = self.get_comids(flowlinefile)

        # open the catchment shapefile

        if verbose: print('reading the catchment shapefile\n')

        shapefile = Reader(source)

        # get the index of the feature id, which links to the flowline comid

        featureid_index = shapefile.fields.index(['FEATUREID', 'N', 9, 0]) - 1

        # go through the comids from the flowlines and add the corresponding
        # catchment to the catchment list

        if verbose: print('searching the catchments in the watershed\n')

        records = shapefile.records()
        indices = []

        i = 0
        for record in records:
            if record[featureid_index] in comids: indices.append(i)
            i += 1

        if len(indices) == 0:
            print('query returned no values, returning\n')
            raise

        # create the new shapefile

        if verbose: print('writing the new catchment shapefile\n')

        w = Writer()

        for field in shapefile.fields:
            w.field(*field)

        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType=5, parts=[shape.points])
            w.record(*records[i])

        w.save(destination)
Example #3
0
def merge_shapes(
    inputfile,
    outputfile=None,
    overwrite=False,
    verbose=True,
    vverbose=False,
):
    """
    Merges all the shapes in a shapefile into a single shape.
    """

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose:
            print('combined watershed shapefile {} exists'.format(outputfile))
        return

    if verbose:
        print('combining shapes from {}\n'.format(inputfile) +
              'this may take a while...\n')

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType=5)

    try:

        combined = combine_shapes(r.shapes(), verbose=vverbose)

    except:

        print('error: unable to combine shapes')
        raise

    # create the new file with the merged shapes

    w = Writer(shapeType=5)

    w.poly(shapeType=5, parts=[combined])

    # copy the fields from the original and then the first record; note this
    # can be adapted as needed

    for field in r.fields:
        w.field(*field)
    w.record(*r.record(0))

    w.save(outputfile)

    if verbose:

        its = inputfile, outputfile
        print('successfully combined shapes from {} to {}\n'.format(*its))
Example #4
0
    def extract_flowlines(self, source, destination, HUC8, verbose = True):
        """Extracts flowlines from the source datafile to the destination using
        the HUC8 for the query."""

        # open the flowline file
    
        if verbose: print('reading the flowline file\n')
    
        shapefile = Reader(source, shapeType = 3)
        records   = shapefile.records()
    
        # figure out which field codes are the Reach code and comid
    
        reach_index = shapefile.fields.index(['REACHCODE', 'C', 14, 0]) - 1
    
        # go through the reach indices, add add them to the list of flowlines
        # if in the watershed; also make a list of the corresponding comids
    
        if verbose: print('searching for flowlines in the watershed\n')
    
        indices = []
       
        i = 0
        for record in records:
            if record[reach_index][:8] == HUC8: indices.append(i)
            i+=1

        if len(indices) == 0:
            if verbose: print('error: query returned no values')
            raise
    
        # write the data from the HUC8 to a new shapefile
    
        w = Writer(shapeType = 3)
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 3, parts = [shape.points])
    
            record = records[i]
    
            # little work around for blank GNIS_ID and GNIS_NAME values
    
            if isinstance(record[3], bytes):
                record[3] = record[3].decode('utf-8')
            if isinstance(record[4], bytes):
                record[4] = record[4].decode('utf-8')
    
            w.record(*record)
    
        w.save(destination)
    
        if verbose: 
            l = len(indices)
            print('queried {} flowlines from original shapefile\n'.format(l))
Example #5
0
    def extract_flowlines(self, source, destination, HUC8, verbose = True):
        """Extracts flowlines from the source datafile to the destination using
        the HUC8 for the query."""

        # open the flowline file
    
        if verbose: print('reading the flowline file\n')
    
        shapefile = Reader(source, shapeType = 3)
        records   = shapefile.records()
    
        # figure out which field codes are the Reach code and comid
    
        reach_index = shapefile.fields.index(['REACHCODE', 'C', 14, 0]) - 1
    
        # go through the reach indices, add add them to the list of flowlines
        # if in the watershed; also make a list of the corresponding comids
    
        if verbose: print('searching for flowlines in the watershed\n')
    
        indices = []
       
        i = 0
        for record in records:
            if record[reach_index][:8] == HUC8: indices.append(i)
            i+=1

        if len(indices) == 0:
            if verbose: print('error: query returned no values')
            raise
    
        # write the data from the HUC8 to a new shapefile
    
        w = Writer(shapeType = 3)
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 3, parts = [shape.points])
    
            record = records[i]
    
            # little work around for blank GNIS_ID and GNIS_NAME values
    
            if isinstance(record[3], bytes):
                record[3] = record[3].decode('utf-8')
            if isinstance(record[4], bytes):
                record[4] = record[4].decode('utf-8')
    
            w.record(*record)
    
        w.save(destination)
    
        if verbose: 
            l = len(indices)
            print('queried {} flowlines from original shapefile\n'.format(l))
Example #6
0
    def extract_catchments(self, 
                           source, 
                           destination, 
                           flowlinefile, 
                           verbose = True,
                           ):
        """
        Extracts the catchments from the source data file to the destination
        using the list of comids for the query.
        """

        # make a list of the comids

        comids = self.get_comids(flowlinefile)

        # open the catchment shapefile
    
        if verbose: print('reading the catchment shapefile\n')
    
        shapefile = Reader(source)
    
        # get the index of the feature id, which links to the flowline comid
    
        featureid_index = shapefile.fields.index(['FEATUREID', 'N', 9, 0]) - 1
    
        # go through the comids from the flowlines and add the corresponding 
        # catchment to the catchment list
    
        if verbose: print('searching the catchments in the watershed\n')
    
        records = shapefile.records()
        indices = []
    
        i = 0
        for record in records:
            if record[featureid_index] in comids: indices.append(i)
            i+=1
    
        if len(indices) == 0:
            print('query returned no values, returning\n')
            raise

        # create the new shapefile
    
        if verbose: print('writing the new catchment shapefile\n')
        
        w = Writer()
    
        for field in shapefile.fields:  w.field(*field)
    
        for i in indices:
            shape = shapefile.shape(i)
            w.poly(shapeType = 5, parts = [shape.points])
            w.record(*records[i])
    
        w.save(destination)
Example #7
0
def merge_shapes(inputfile, 
                 outputfile = None, 
                 overwrite = False, 
                 verbose = True, 
                 vverbose = False,
                 ):
    """
    Merges all the shapes in a shapefile into a single shape.
    """

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose: 
            print('combined watershed shapefile {} exists'.format(outputfile))
        return
   
    if verbose: print('combining shapes from {}\n'.format(inputfile) + 
                      'this may take a while...\n')

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType = 5)

    try: 

        combined = combine_shapes(r.shapes(), verbose = vverbose)

    except:

        print('error: unable to combine shapes')
        raise

    # create the new file with the merged shapes

    w = Writer(shapeType = 5)

    w.poly(shapeType = 5, parts = [combined])

    # copy the fields from the original and then the first record; note this
    # can be adapted as needed

    for field in r.fields: w.field(*field)
    w.record(*r.record(0))

    w.save(outputfile)

    if verbose: 

        its = inputfile, outputfile
        print('successfully combined shapes from {} to {}\n'.format(*its))
Example #8
0
def clip_value(in_file, ot_dir, min_height, max_height):
    """
    オンライン学習4 ベクタデータのフィルタリング
    
    浸水・土砂崩れベクタデータをGISデータの属性値(値)を使用してフィルタリングするプログラムを実行します。
    
    関数  : clip_value
    引数1 : 浸水・土砂崩れベクタデータ(*.shp)
    引数2 : 出力ディレクトリ名
    引数3 : 出力対象となる値の最小値
    引数4 : 出力対象となる値の最大値
    
    """
    # Get actual file path
    in_file = path.join(DATA_PATH_BASE, in_file)
    ot_dir = path.join(DATA_PATH_BASE, ot_dir)
    makedirs(ot_dir, exist_ok=True)

    ot_file = path.join(
        ot_dir, "{0}v.tif".format(path.splitext(path.basename(in_file))[0]))

    reader = ShpReader(in_file, encoding='cp932')
    writer = ShpWriter(ot_file, encoding='cp932')

    # Create DBF schema
    height_col_id = None
    for i, col in enumerate(
        (col for col in reader.fields if col[0] != "DeletionFlag")):
        if col[0] != "DeletionFlag":
            writer.field(col[0], col[1], col[2], col[3])
        if col[0] == "height":
            height_col_id = i

    if height_col_id is None:
        print("height column not found in polygon shapefile")
        return

    # Filtering
    n_mesh = reader.numRecords
    cnt_mesh = 0
    for data in reader.iterShapeRecords():
        height = data.record[height_col_id]
        if (height is not None) and (min_height <= height <= max_height):
            # This polygon is output target.
            writer.shape(data.shape)
            writer.record(*data.record)

        cnt_mesh = cnt_mesh + 1
        if cnt_mesh % 100000 == 0:
            print("{0}K / {1}K".format(cnt_mesh / 1000, n_mesh / 1000))

    writer.close()
Example #9
0
def main(shp):
    shp, _ = os.path.splitext(shp)
    with IO() as shpio, IO() as dbfio:  # Don't overwrite existing .shp, .dbf
        with Reader(shp) as r, Writer(shp=shpio, dbf=dbfio, shx=shp+'.shx') as w:
            w.fields = r.fields[1:]  # skip first deletion field
            for rec in r.iterShapeRecords():
                w.record(*rec.record)
                w.shape(rec.shape)
Example #10
0
def trim_shapefile(
    in_path: Union[Path, str],
    join_on: str,
    include: list,
    out_path: Union[Path, str, None] = None,
) -> Union[Path, str]:
    """Trims a shapefile to only include shapes that match the given criteria.

    Shapes will be discarded unless their 'join_on' property is contained in the
    'include' list.
    """

    # Resolve the shapefile path (allows in_path to point to directory with same
    # name as nested shapefile)
    in_path = resolve_shapefile_path(in_path)

    # Construct new name if it was not provided
    if out_path is None:
        out_path = in_path.with_name(f"{in_path.name}_trimmed{in_path.suffix}")

    with Reader(str(in_path)) as r, Writer(str(out_path)) as w:

        w.fields = r.fields[1:]  # don't copy deletion field

        if join_on not in [f[0] for f in w.fields]:
            raise ValueError(f"'join_on'={join_on} not in shapefile fields: {w.fields}")

        # Copy features if they match the criteria
        for feature in r.iterShapeRecords():
            if feature.record[join_on] in include:
                w.record(*feature.record)
                w.shape(feature.shape)

    # PyShp doesn't manage .prj file, must copy manually.
    in_prj = in_path.with_suffix(".prj")
    if in_prj.exists():
        out_prj = out_path.with_suffix(".prj")
        shutil.copy(in_prj, out_prj)

    return out_path
Example #11
0
    def extract_HUC8(
        self,
        HUC8,
        output,
        gagefile='gagestations',
        verbose=True,
    ):
        """
        Extracts the USGS gage stations for a watershed from the gage 
        station shapefile into a shapefile for the 8-digit hydrologic unit 
        code of interest. 
        """

        # make sure the metadata exist locally

        self.download_metadata()

        # make sure the output destination exists

        if not os.path.isdir(output): os.mkdir(output)

        sfile = '{}/{}'.format(output, gagefile)
        if not os.path.isfile(sfile + '.shp'):

            # copy the projection

            shutil.copy(self.NWIS + '.prj', sfile + '.prj')

            # read the file

            gagereader = Reader(self.NWIS, shapeType=1)
            gagerecords = gagereader.records()

            # pull out the HUC8 record to parse the dataset

            HUC8_index = gagereader.fields.index(['HUC', 'C', 8, 0]) - 1

            # iterate through the field and find gages in the watershed

            its = HUC8, sfile
            print('extracting gage stations in {} to {}\n'.format(*its))

            gage_indices = []

            i = 0
            for record in gagerecords:
                if record[HUC8_index] == HUC8: gage_indices.append(i)
                i += 1

            # write the data from the HUC8 to a new shapefile

            w = Writer(shapeType=1)

            for field in gagereader.fields:
                w.field(*field)

            for i in gage_indices:
                point = gagereader.shape(i).points[0]
                w.point(*point)
                w.record(*gagerecords[i])

            w.save(sfile)

            if verbose:

                print('successfully extracted NWIS gage stations\n')

        elif verbose:

            print('gage station file {} exists\n'.format(sfile))

        self.set_metadata(sfile)
Example #12
0
def merge_shapes(inputfile, outputfile = None, overwrite = False, 
                 verbose = True, vverbose = False):
    """Merges all the shapes in a shapefile into a single shape."""

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose: print('combined watershed shapefile %s exists' % outputfile)
        return
   
    if verbose: print('combining shapes from {}\n'.format(inputfile) + 
                      'this may take a while...\n')

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType = 5)
    n = len(r.records())

    try: 
        shapes  = []
        records = [] 
        bboxes  = []

        for i in range(n):
            shape = r.shape(i)
            record = r.record(i)

            shape_list = format_shape(shape.points)

            for sh in shape_list:
                shapes.append(sh)
                records.append(record)
                bboxes.append(shape.bbox)

                try: combined = combine_shapes(shapes, bboxes, 
                                               verbose = vverbose)
                except: 
                    if verbose: print('trying alternate trace method')
                    combined = combine_shapes(shapes, bboxes, skip = True, 
                                              verbose = vverbose)

    except:
        if verbose: print('trying alternate trace method')
        shapes  = []
        records = [] 
        bboxes  = []
        for i in range(n):
            shape = r.shape(i)
            record = r.record(i)

            shape_list = format_shape(shape.points, omit = True)

            for sh in shape_list:
                shapes.append(sh)
                records.append(record)
                bboxes.append(shape.bbox)

        try:    combined = combine_shapes(shapes, bboxes, verbose = vverbose)
        except: 
            if verbose: print('trying alternate trace method')
            combined = combine_shapes(shapes, bboxes, skip = True,
                                      verbose = vverbose)

    # create the new file with the merged shapes

    w = Writer(shapeType = 5)

    w.poly(shapeType = 5, parts = [combined])

    # copy the fields from the original and then the first record; note this
    # can be adapted as needed

    for field in r.fields: w.field(*field)
    w.record(*r.record(0))

    w.save(outputfile)

    if verbose: 
        print('successfully combined shapes from %s to %s\n' % 
              (inputfile, outputfile))
Example #13
0
    def extract_HUC8(self, HUC8, output, gagefile = 'gagestations', 
                     verbose = True):
        """Extracts the USGS gage stations for a watershed from the gage 
        station shapefile into a shapefile for the 8-digit hydrologic unit 
        code of interest. 
        """

        # make sure the metadata exist locally

        self.download_metadata()

        # make sure the output destination exists

        if not os.path.isdir(output): os.mkdir(output)

        sfile = '{}/{}'.format(output, gagefile)
        if not os.path.isfile(sfile + '.shp'):

            # copy the projection

            shutil.copy(self.NWIS + '.prj', sfile + '.prj')

            # read the file

            gagereader  = Reader(self.NWIS, shapeType = 1)
            gagerecords = gagereader.records()

            # pull out the HUC8 record to parse the dataset

            HUC8_index  = gagereader.fields.index(['HUC',  'C', 8, 0]) - 1

            # iterate through the field and find gages in the watershed

            its = HUC8, sfile
            print('extracting gage stations in {} to {}\n'.format(*its))

            gage_indices = []

            i = 0
            for record in gagerecords:
                if record[HUC8_index] == HUC8: gage_indices.append(i)
                i+=1

            # write the data from the HUC8 to a new shapefile

            w = Writer(shapeType = 1)

            for field in gagereader.fields:  w.field(*field)

            for i in gage_indices:
                point = gagereader.shape(i).points[0]
                w.point(*point)
                w.record(*gagerecords[i])

            w.save(sfile)

            if verbose: 
                print('successfully extracted NWIS gage stations\n')

        elif verbose: 

            print('gage station file {} exists\n'.format(sfile))

        self.set_metadata(sfile)
Example #14
0
def add_height_vector(in_polys, in_hpoint, dst_fn):
    """
    オンライン学習3 被害領域の抽出、ラスタベクタ変換

    メッシュデータに標高値を付与します。
    
    関数   : add_height_vector
    引数1  : 入力メッシュデータ名(.tif)
    引数2  : 数値標高モデル名(.shp)
    引数3 : 出力ファイル名(.shp)

    """

    # Read DEM data
    print("loading DEM data ...")
    dem = GridData()
    dem_reader = ShpReader(in_hpoint, encoding='cp932')

    n_p = dem_reader.numRecords
    cnt_p = 0
    for data in dem_reader.iterShapeRecords():
        point = Point(data.shape.points[0])
        p_val = data.record

        dem.add_data(point.x, point.y, p_val)
        cnt_p = cnt_p + 1
        if cnt_p % 100000 == 0:
            print("{0}K / {1}K".format(cnt_p/1000, n_p/1000))
    print("loaded DEM data .")
    print()

    # Process each polygon shapefile
    for in_poly in in_polys:
        print("processing {0} ...".format(in_poly))
        poly_reader = ShpReader(in_poly)
        poly_writer = ShpWriter(target=dst_fn)

        # Create DBF schema
        for col in poly_reader.fields:
            if col[0] != "DeletionFlag":
                poly_writer.field(col[0], col[1], col[2], col[3])
        poly_writer.field("height", "N", 18, 9)

        # Attach elevation value
        n_poly = poly_reader.numRecords
        cnt_poly = 0
        for data in poly_reader.iterShapeRecords():
            center = Polygon(data.shape.points).centroid
            key_x = dem.search_nearest_x(center.coords[0][0])
            key_y = dem.search_nearest_y(center.coords[0][1])
            dem_record = dem.get_data(key_x, key_y)
            if dem_record:
                # Nearest grid point has elevation value
                record = data.record + dem_record
            else:
                # Nearest grid point doesn't have elevation value
                record = data.record + [None]

            poly_writer.shape(data.shape)
            poly_writer.record(*record)

            cnt_poly = cnt_poly + 1
            if cnt_poly % 100000 == 0:
                print("{0}K / {1}K".format(cnt_poly/1000, n_poly/1000))

        poly_writer.close()

        print("processed {0} .".format(in_poly))
        print()
Example #15
0
    def extract_bbox(self, bbox, output, verbose=True):
        """Extracts the NID dam locations for a watershed from the dam 
        shapefile and the 8-digit hydrologic unit code of interest. 
        """

        self.download_compressed()

        xmin, ymin, xmax, ymax = bbox

        # copy the projection files

        if verbose: print('copying the projections from the NID source\n')

        projection = self.source + '.prj'

        shutil.copy(projection, output + '.prj')

        # get the dams within the watershed

        if verbose: print('reading the dam file\n')

        sf = Reader(self.source, shapeType=1)

        # work around for issues with pyshp

        damrecords = []
        for i in range(len(sf.shapes())):
            try:
                damrecords.append(sf.record(i))
            except:
                damrecords.append([-100 for i in range(len(sf.fields))])

        name_index = sf.fields.index(['DAM_NAME', 'C', 65, 0]) - 1
        nid_index = sf.fields.index(['NIDID', 'C', 7, 0]) - 1
        long_index = sf.fields.index(['LONGITUDE', 'N', 19, 11]) - 1
        lat_index = sf.fields.index(['LATITUDE', 'N', 19, 11]) - 1
        river_index = sf.fields.index(['RIVER', 'C', 65, 0]) - 1
        owner_index = sf.fields.index(['OWN_NAME', 'C', 65, 0]) - 1
        type_index = sf.fields.index(['DAM_TYPE', 'C', 10, 0]) - 1
        purp_index = sf.fields.index(['PURPOSES', 'C', 254, 0]) - 1
        year_index = sf.fields.index(['YR_COMPL', 'C', 10, 0]) - 1
        high_index = sf.fields.index(['NID_HEIGHT', 'N', 19, 11]) - 1
        mstor_index = sf.fields.index(['MAX_STOR', 'N', 19, 11]) - 1
        nstor_index = sf.fields.index(['NORMAL_STO', 'N', 19, 11]) - 1
        area_index = sf.fields.index(['SURF_AREA', 'N', 19, 11]) - 1

        # iterate through the fields and determine which points are in the box

        if verbose: print('extracting dams into new file\n')

        dam_indices = []

        i = 0
        for record in damrecords:

            lat = record[lat_index]
            lon = record[long_index]

            if self.inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
                dam_indices.append(i)
            i += 1

        # write the data from the bbox to a new shapefile

        w = Writer(output, shapeType=1)

        for field in sf.fields:
            w.field(*field)

        for i in dam_indices:
            point = sf.shape(i).points[0]
            w.point(*point)

            values = damrecords[i]

            rs = []

            for value in values:

                if isinstance(value, bytes): value = value.decode('utf-8')
                rs.append(value)

            w.record(*rs)

        w.close()

        if verbose:

            print('successfully extracted NID dam locations to new file\n')
Example #16
0
    def extract_bbox(self, bbox, output, verbose = True):
        """Extracts the NID dam locations for a watershed from the dam 
        shapefile and the 8-digit hydrologic unit code of interest. 
        """

        self.download_compressed()

        xmin, ymin, xmax, ymax = bbox

        # copy the projection files

        if verbose: print('copying the projections from the NID source\n')

        projection = self.source + '.prj'

        shutil.copy(projection, output + '.prj')

        # get the dams within the watershed

        if verbose: print('reading the dam file\n')

        sf = Reader(self.source, shapeType = 1)

        # work around for issues with pyshp

        damrecords   = []
        for i in range(len(sf.shapes())):
            try: damrecords.append(sf.record(i))
            except: damrecords.append([-100 for i in range(len(sf.fields))])

        name_index  = sf.fields.index(['DAM_NAME',   'C', 65,   0]) - 1
        nid_index   = sf.fields.index(['NIDID',      'C', 7,    0]) - 1
        long_index  = sf.fields.index(['LONGITUDE',  'N', 19,  11]) - 1
        lat_index   = sf.fields.index(['LATITUDE',   'N', 19,  11]) - 1
        river_index = sf.fields.index(['RIVER',      'C', 65,   0]) - 1
        owner_index = sf.fields.index(['OWN_NAME',   'C', 65,   0]) - 1
        type_index  = sf.fields.index(['DAM_TYPE',   'C', 10,   0]) - 1
        purp_index  = sf.fields.index(['PURPOSES',   'C', 254,  0]) - 1
        year_index  = sf.fields.index(['YR_COMPL',   'C', 10,   0]) - 1
        high_index  = sf.fields.index(['NID_HEIGHT', 'N', 19,  11]) - 1
        mstor_index = sf.fields.index(['MAX_STOR',   'N', 19,  11]) - 1
        nstor_index = sf.fields.index(['NORMAL_STO', 'N', 19,  11]) - 1
        area_index  = sf.fields.index(['SURF_AREA',  'N', 19,  11]) - 1

        # iterate through the fields and determine which points are in the box

        if verbose: print('extracting dams into new file\n')

        dam_indices = []

        i = 0
        for record in damrecords:

            lat = record[lat_index]
            lon = record[long_index]

            if self.inside_box([xmin, ymin], [xmax, ymax], [lon, lat]):
                dam_indices.append(i)
            i+=1

        # write the data from the bbox to a new shapefile

        w = Writer(shapeType = 1)

        for field in sf.fields:  w.field(*field)

        for i in dam_indices:
            point = sf.shape(i).points[0]
            w.point(*point)

            values = damrecords[i]

            rs = []

            for value in values:

                if isinstance(value, bytes): value = value.decode('utf-8')
                rs.append(value)

            w.record(*rs)

        w.save(output)

        if verbose: 

            print('successfully extracted NID dam locations to new file\n')
Example #17
0
    def plot_gage_subbasin(self, hspfmodel, folder):
        """Makes a plot of the subbasin area."""

        subbasinfile = '{}/subbasins'.format(folder)
        boundaryfile = '{}/boundary'.format(folder)
        flowfile = '{}/flowlines'.format(folder)
        combinedfile = '{}/combined'.format(folder)
        watershedplot = '{}/watershed.png'.format(folder)

        # make a shapefile of the subbasins for the watershed

        f = '{0}/{1}/{1}subbasins'.format(self.directory, self.HUC8)
        for out in (subbasinfile, boundaryfile, flowfile, combinedfile):
            if not os.path.isfile(out + '.prj'):
                shutil.copy(f + '.prj', out + '.prj')

        if not os.path.isfile(subbasinfile + '.shp'):

            subshapes = []
            subrecords = []
            for subbasin in hspfmodel.subbasins:

                f = '{0}/{1}/{2}/combined'.format(self.directory, self.HUC8,
                                                  subbasin)
                s = Reader(f, shapeType=5)

                subshapes.append(s.shape(0).points)
                subrecords.append(s.record(0))

            w = Writer(shapeType=5)

            for field in s.fields:
                w.field(*field)
            for record in subrecords:
                w.record(*record)
            for shape in subshapes:
                w.poly(shapeType=5, parts=[shape])

            w.save(subbasinfile)

        if not os.path.isfile(combinedfile + '.shp'):

            fshapes = []
            frecords = []
            for subbasin in hspfmodel.subbasins:
                f = '{0}/{1}/{2}/combined_flowline'.format(
                    self.directory, self.HUC8, subbasin)
                r = Reader(f, shapeType=3)

                fshapes.append(r.shape(0).points)
                frecords.append(r.record(0))

            w = Writer(shapeType=3)

            for field in r.fields:
                w.field(*field)
            for record in frecords:
                w.record(*record)
            for shape in fshapes:
                w.poly(shapeType=3, parts=[shape])

            w.save(combinedfile)

        # merge the shapes into a watershed

        if not os.path.exists(boundaryfile + '.shp'):

            merge_shapes(subbasinfile, outputfile=boundaryfile)

        # make a flowline file for the subbasins for the watershed

        if not os.path.isfile(flowfile + '.shp'):

            shapes = []
            records = []
            for subbasin in hspfmodel.subbasins:
                f = '{0}/{1}/{2}/flowlines'.format(self.directory, self.HUC8,
                                                   subbasin)
                r = Reader(f, shapeType=3)
                for shape in r.shapes():
                    shapes.append(shape.points)
                for record in r.records():
                    records.append(record)

            w = Writer(shapeType=3)

            for field in r.fields:
                w.field(*field)
            for record in records:
                w.record(*record)
            for shape in shapes:
                w.poly(shapeType=3, parts=[shape])

            w.save(flowfile)

        if not os.path.isfile(watershedplot):

            plot_gage_subbasin(folder,
                               self.HUC8,
                               self.gageid,
                               hspfmodel,
                               output=watershedplot)
def makeSHP(dic):
    shpname = saveSHP()
    shpWriter = Writer()
    shpWriter.autoBalance = 1

    shpWriter.field(headerEntry.get(), 'C', '255')
    shpWriter.field('Longitude', 'F')
    shpWriter.field('Latitude', 'F')
    
    geomtype =1
    shpWriter.shapeType = geomtype
    parsedGeometryList = []
    dicValList = []
    dicKeyList = []
    for k in dic.keys():
        dicValList.append(dic[k])
        valist = k,dic[k][0],dic[k][1]
        dicKeyList.append(valist)
                          
    [parsedGeometryList.append(filez) for filez in dicValList]
    [shpWriter.point(*parsedGeometry) for parsedGeometry in parsedGeometryList]
    
    [shpWriter.record(*dList) for dList in dicKeyList]

    shpWriter.save(shpname) 
    prj = generatePRJ(int(sridEntry.get()))
    if prj != None:
        prjfile = shpname.replace('.shp','') + '.prj' 
        prjfileOpen = open(prjfile, 'w')
        prjfileOpen.write(prj)
        prjfileOpen.close()
    return shpname
Example #19
0
def trans_vector(in_file, ot_dir, output_flg, dem_path, flood_flg):
    """
    オンライン学習3 被害領域の抽出、ラスタベクタ変換

    二値画像からポリゴンを生成します
    
    関数   : trans_vector
    引数1  : 入力ファイル名(.tif)
    引数2  : 出力ディレクトリ名
    引数3 : 出力フラグ(0:被災領域、1:非被災領域)
    引数4 : 数値標高モデル名(.shp)
    引数5 : 災害フラグ(True:浸水、False:土砂崩れ)

    """

    # Get destination file name
    filename = path.splitext(path.basename(in_file))[0]
    if filename.lower().startswith("sendai"):
        basename = "Sendai"
    elif filename.lower().startswith("kumamoto"):
        basename = "Kumamoto"
    else:
        basename = filename

    # Get actual file path
    in_file = path.join(DATA_PATH_BASE, in_file)
    ot_dir = path.join(DATA_PATH_BASE, ot_dir)
    dem_path = path.join(DATA_PATH_BASE, dem_path)
    makedirs(ot_dir, exist_ok=True)

    print("creating shapefile ...")

    # Create shapefile information of output area

    fn_tmp = path.join(ot_dir, "tmp.shp")
    writer = ShpWriter(target=fn_tmp, shapeType=POLYGON)
    writer.field("id", "C", "20", 0)
    writer.field("type", "C", "10", 0)
    writer.field("format", "C", "10", 0)
    writer.field("dis_tf", "C", "8", 0)
    writer.field("dis_tt", "C", "8", 0)
    writer.field("proc", "C", "8", 0)
    writer.field("pre_dn", "C", "10", 0)
    writer.field("pre_st", "C", "10", 0)
    writer.field("post_dn", "C", "10", 0)
    writer.field("post_st", "C", "10", 0)
    """
    オンライン学習3 被害領域の抽出、ラスタベクタ変換
    
    ポリゴンに付与する属性情報を定義するプログラムを実行します
    
    関数   : get_flood_record
    関数   : get_land_slide_record

    """
    if flood_flg:
        # flood processing
        record = get_flood_record()
    else:
        # landslide processing
        record = get_land_slide_record()

    # Read binary image and get coordinate information
    bin = imread(in_file)
    rect_tiff = RectGeoTiffCoordinateCalculator(in_file)

    # Create rectangle polygon of output area and output to shapefile
    n_shape = bin.shape[0] * bin.shape[1]
    cnt = 0
    for x_index, y_index in itertools.product(range(bin.shape[1]),
                                              range(bin.shape[0])):
        """
        オンライン学習3 被害領域の抽出、ラスタベクタ変換
    
        二値画像の各ピクセル四隅座標(緯度、経度)を計算するプログラムを実行します
        
        関数   : create_polygon_points
        引数1  : 対象ピクセルのx番号
        引数2  : 対象ピクセルのy番号
        引数3 : 二値画像の図形情報(ファイル名、画像サイズ等)を持つインスタンス
    
        """
        points = create_polygon_points(x_index, y_index, rect_tiff)
        """
        オンライン学習3 被害領域の抽出、ラスタベクタ変換
    
        二値画像からメッシュを作成します
        
        bin[y_index, x_index] == 255):ピクセル値が255の場合
        output_flg == "0":被災領域のメッシュを作成
        output_flg == "1":非被災領域のメッシュを作成
    
        """
        if (bin[y_index, x_index] == 255) == (output_flg == "0"):
            # This pixel is output target.
            writer.poly([points])
            writer.record(*record)

        cnt = cnt + 1
        if cnt % 100000 == 0:
            print("{0}K / {1}K".format(cnt / 1000, n_shape / 1000))
    writer.close()
    print("created shapefile .")

    if output_flg == "0":
        fn_out = path.join(
            ot_dir, "{0}_{1}cbp.shp".format(basename,
                                            time.strftime("%Y%m%d%H%M%S")))
    else:
        fn_out = path.join(
            ot_dir, "{0}_{1}cbpr.shp".format(basename,
                                             time.strftime("%Y%m%d%H%M%S")))

    # Attach elevation value
    """
    オンライン学習3 被害領域の抽出、ラスタベクタ変換

    メッシュデータに標高値を付与するプログラムを実行します
    
    関数   : add_height_vector
    引数1  : 入力メッシュデータ名(.tif)
    引数2  : 数値標高モデル名(.shp)
    引数3 : 出力ファイル名(.shp)

    """
    add_height_vector([fn_tmp], dem_path, fn_out)

    if not DEV_FLAG:
        # Delete temporary file.
        remove("{0}.shp".format(path.splitext(fn_tmp)[0]))
        remove("{0}.shx".format(path.splitext(fn_tmp)[0]))
        remove("{0}.dbf".format(path.splitext(fn_tmp)[0]))
Example #20
0
def write(writer: shapefile.Writer, output):
    writer.saveShp(output)
Example #21
0
	def export_data(self, query):
		
		def get_label(item):
			
			label = item.descriptor
			if label is None:
				return None
			return label.label
		
		def abbrev_to(name, chars, columns_abbrev):
			
			if len(name) > chars:
				n = 1
				while True:
					name_new = name[:chars - len(str(n))] + str(n)
					if not name_new in columns_abbrev.values():
						return name_new
					n += 1
			return name
		
		path = as_path(self.url, check_if_exists = False)
		if path is None:
			return
		
		geometries = []  # [[coords, geometry_type], ...]
		row_idxs = [] # [row_idx, ...]
		for row_idx, row in enumerate(query):
			for column in row:
				label = get_label(row[column])
				if label.__class__.__name__ == "DGeometry":
					geometries.append(label.coords)
					row_idxs.append(row_idx)
					break
		
		if not row_idxs:
			return
		
		columns_abbrev = {} # {column: column_abbrev, ...}; abbreviated column names
		for column in query.columns:
			column_abbrev = column
			if len(column_abbrev) > 10:
				if "." in column_abbrev:
					column_abbrev = column_abbrev.split(".")
					column_abbrev = "_".join([abbrev_to(column_abbrev[0], 4, columns_abbrev), abbrev_to(column_abbrev[1], 5, columns_abbrev)])
				else:
					column_abbrev = abbrev_to(column_abbrev, 10, columns_abbrev)
			column_abbrev = column_abbrev.replace(".", "_")
			columns_abbrev[column] = column_abbrev
		
		shapeType = -1
		shape_types = {
			"POINT": 1,
			"LINESTRING": 3,
			"POLYGON": 5,
			"MULTIPOINT": 8,
			"POINTZ": 11,
			"LINESTRINGZ": 13,
			"POLYGONZ": 15,
			"MULTIPOINTZ": 18,
			"POINTM": 21,
			"LINESTRINGM": 23,
			"POLYGONM": 25,
			"MULTIPOINTM": 28,
		}
		for _, geometry_type in geometries:
			if geometry_type not in shape_types:
				raise Exception("Unknown geometry type")
			if shapeType > -1:
				if shape_types[geometry_type] != shapeType:
					raise Exception("Geometries must be of the same type")
			else:
				shapeType = shape_types[geometry_type]
		
		sf = Writer(shapeType = shapeType)
		types = {} # {column: type, ...}
		shp_types = {bool: "C", int: "N", float: "N", str: "C"}
		conv_order = ["N", "C"]
		for row in query:
			for column in row:
				label = get_label(row[column])
				if label.__class__.__name__ != "DString":
					continue
				value = label.try_numeric
				typ = type(value)
				typ = shp_types[typ] if typ in shp_types else "C"
				if (not column in types) or ((typ != types[column]) and (conv_order.index(typ) > conv_order.index(types[column]))):
					types[column] = typ
		for column in types:
			sf.field(columns_abbrev[column], fieldType = types[column], size = "128")
		for i in range(len(geometries)):
			row = query[row_idxs[i]]
			coords = geometries[i][0]
			if shapeType in [1, 11, 21]: # point types
				sf.point(*coords[0], shapeType = shapeType)
			else:
				sf.poly(shapeType = shapeType, parts = [coords])
			if types:
				record = []
				for column in types:
					label = get_label(row[column])
					if label is not None:
						label = label.value
					record.append(label)
				sf.record(*record)
		sf.save(path)
Example #22
0
def combine_catchments(catchmentfile,
                       flowfile,
                       elevationfile,
                       comid,
                       output=None,
                       overwrite=False,
                       verbose=True):
    """Combines together all the catchments in a basin catchment shapefile.
    Creates a new shapefile called "combined" in the same directory as the 
    original file.  Uses the elevation data from the raster file and the flow
    data file to estimate the length and average slope of the overland flow 
    plane.
    """

    t0 = time.time()
    numpy.seterr(all='raise')

    if output is None: output = os.getcwd() + r'\combined'

    if os.path.isfile(output + '.shp') and not overwrite:
        if verbose: print('combined catchment shapefile %s exists' % output)
        return

    if verbose: print('combining catchments from %s\n' % catchmentfile)

    # start by copying the projection files

    shutil.copy(catchmentfile + '.prj', output + '.prj')

    # load the catchment and flowline shapefiles

    c = Reader(catchmentfile, shapeType=5)
    f = Reader(flowfile, shapeType=3)

    # make lists of the comids and featureids

    featureid_index = c.fields.index(['FEATUREID', 'N', 9, 0]) - 1
    comid_index = f.fields.index(['COMID', 'N', 9, 0]) - 1

    featureids = [r[featureid_index] for r in c.records()]
    comids = [r[comid_index] for r in f.records()]

    # check that shapes are traceable--don't have multiple points and start
    # and end at the same place--then make an appropriate list of shapes
    # and records--note it's more memory efficient to read one at a time

    n = len(c.records())
    shapes = []
    records = []
    bboxes = []

    try:
        for i in range(n):
            catchment = c.shape(i)
            record = c.record(i)

            shape_list = format_shape(catchment.points)
            for s in shape_list:
                shapes.append(s)
                records.append(record)
                bboxes.append(catchment.bbox)

        try:
            combined = combine_shapes(shapes, bboxes, verbose=verbose)
        except:
            combined = combine_shapes(shapes,
                                      bboxes,
                                      skip=True,
                                      verbose=verbose)

    except:
        shapes = []
        records = []
        bboxes = []
        for i in range(n):
            catchment = c.shape(i)
            record = c.record(i)

            shape_list = format_shape(catchment.points, omit=True)
            for s in shape_list:
                shapes.append(s)
                records.append(record)
                bboxes.append(catchment.bbox)

        try:
            combined = combine_shapes(shapes, bboxes, verbose=verbose)
        except:
            combined = combine_shapes(shapes,
                                      bboxes,
                                      skip=True,
                                      verbose=verbose)

    # iterate through the catchments and get the elevation data from NED
    # then estimate the value of the overland flow plane length and slope

    lengths = numpy.empty((n), dtype='float')
    slopes = numpy.empty((n), dtype='float')

    for i in range(n):
        catchment = c.shape(i)
        flowline = f.shape(comids.index(featureids[i]))

        catchpoints = get_raster_on_poly(elevationfile,
                                         catchment.points,
                                         verbose=verbose)
        catchpoints = numpy.array([p for p in catchpoints])

        zs = get_raster(elevationfile, flowline.points)

        flowpoints = numpy.array([[p[0], p[1], z]
                                  for p, z in zip(flowline.points, zs)])

        # iterate through the raster values and find the closest flow point

        closest = numpy.empty((len(catchpoints), 3), dtype='float')

        for point, j in zip(catchpoints, range(len(catchpoints))):
            closest[j] = flowpoints[numpy.dot(flowpoints[:, :2],
                                              point[:2]).argmin()]

        # estimate the slope and overland flow plane length

        length, slope = get_overland_vector(catchpoints, closest)

        if verbose:
            print('avg slope and length =', slope.mean(), length.mean())

        lengths[i], slopes[i] = length.mean(), slope.mean()

    if verbose: print('\nfinished overland flow plane calculations\n')

    # get area of the subbasin from the catchment metadata

    areasq_index = c.fields.index(['AreaSqKM', 'N', 19, 6]) - 1
    areas = numpy.array([r[areasq_index] for r in c.records()])

    # take the area weighted average of the slopes and flow lengths

    tot_area = round(areas.sum(), 2)
    avg_length = round(1000 * numpy.sum(areas * lengths) / tot_area, 1)
    avg_slope = round(numpy.sum(areas * slopes) / tot_area, 4)

    # get the centroid and the average elevation

    combined = [[float(x), float(y)] for x, y in combined]
    centroid = get_centroid(numpy.array(combined))

    Cx, Cy = round(centroid[0], 4), round(centroid[1], 4)

    elev_matrix, origin = get_raster_in_poly(elevationfile,
                                             combined,
                                             verbose=verbose)

    elev_matrix = elev_matrix.flatten()
    elev_matrix = elev_matrix[elev_matrix.nonzero()]

    avg_elev = round(elev_matrix.mean() / 100., 2)

    # write the data to the shapefile

    w = Writer(shapeType=5)

    fields = [['ComID', 'N', 9, 0], ['PlaneLenM', 'N', 8, 2],
              ['PlaneSlope', 'N', 9, 6], ['AreaSqKm', 'N', 10, 2],
              ['CenX', 'N', 12, 6], ['CenY', 'N', 12, 6],
              ['AvgElevM', 'N', 8, 2]]

    record = [comid, avg_length, avg_slope, tot_area, Cx, Cy, avg_elev]

    for field in fields:
        w.field(*field)

    w.record(*record)

    w.poly(shapeType=5, parts=[combined])

    w.save(output)

    if verbose:
        print('\ncompleted catchment combination in %.1f seconds\n' %
              (time.time() - t0))
Example #23
0
def merge_shapes(inputfile, outputfile = None, overwrite = False, 
                 verbose = True, vverbose = False):
    """Merges all the shapes in a shapefile into a single shape."""

    if outputfile is None: output = '{}/merged'.format(os.getcwd())

    if os.path.isfile(outputfile + '.shp') and not overwrite:
        if verbose: print('combined watershed shapefile %s exists' % outputfile)
        return
   
    if verbose: print('combining shapes from %s\n' % inputfile)

    # start by copying the projection files

    shutil.copy(inputfile + '.prj', outputfile + '.prj')

    # load the catchment and flowline shapefiles

    r = Reader(inputfile, shapeType = 5)
    n = len(r.records())

    try: 
        shapes  = []
        records = [] 
        bboxes  = []

        for i in range(n):
            shape = r.shape(i)
            record = r.record(i)

            shape_list = format_shape(shape.points)

            for sh in shape_list:
                shapes.append(sh)
                records.append(record)
                bboxes.append(shape.bbox)

                try: combined = combine_shapes(shapes, bboxes, 
                                               verbose = vverbose)
                except: 
                    if verbose: print('trying alternate trace method')
                    combined = combine_shapes(shapes, bboxes, skip = True, 
                                              verbose = vverbose)

    except:
        if verbose: print('trying alternate trace method')
        shapes  = []
        records = [] 
        bboxes  = []
        for i in range(n):
            shape = r.shape(i)
            record = r.record(i)

            shape_list = format_shape(shape.points, omit = True)

            for sh in shape_list:
                shapes.append(sh)
                records.append(record)
                bboxes.append(shape.bbox)

        try:    combined = combine_shapes(shapes, bboxes, verbose = vverbose)
        except: 
            if verbose: print('trying alternate trace method')
            combined = combine_shapes(shapes, bboxes, skip = True,
                                      verbose = vverbose)

    # create the new file with the merged shapes

    w = Writer(shapeType = 5)

    w.poly(shapeType = 5, parts = [combined])

    # copy the fields from the original and then the first record; note this
    # can be adapted as needed

    for field in r.fields: w.field(*field)
    w.record(*r.record(0))

    w.save(outputfile)

    if verbose: 
        print('successfully combined shapes from %s to %s\n' % 
              (inputfile, outputfile))
Example #24
0
def clip_shape(in_mesh, in_mask, ot_dir, flg_mask):
    """
    オンライン学習4 ベクタデータのフィルタリング
    
    浸水・土砂崩れベクタデータをGISデータの形状を利用してフィルタリングします。
    
    関数  : clip_shape
    引数1 : 浸水・土砂崩れベクタデータ(*.shp)
    引数2 : GISデータ(*.shp)
    引数3 : 出力ディレクトリ名
    引数4 : 出力フラグ(True:GISデータと重なる部分を出力、False:GISデータと重ならない部分を出力)
    
    """
    # Get actual file path
    in_mesh = path.join(DATA_PATH_BASE, in_mesh)
    in_mask = path.join(DATA_PATH_BASE, in_mask)
    ot_dir = path.join(DATA_PATH_BASE, ot_dir)
    makedirs(ot_dir, exist_ok=True)

    ot_file = path.join(ot_dir, "{0}s.tif".format(path.splitext(path.basename(in_mesh))[0]))

    reader_mesh = ShpReader(in_mesh, encoding='cp932')
    reader_mask = ShpReader(in_mask, encoding='cp932')
    writer = ShpWriter(ot_file, encoding='cp932')

    # Create DBF schema
    for col in reader_mesh.fields:
        if col[0] != "DeletionFlag":
            writer.field(col[0], col[1], col[2], col[3])

    # Create set of mask polygon
    maskdata = []
    for data in reader_mask.iterShapes():
        points = data.points
        points_split = list(data.parts) + [len(points)]

        poly_list = []
        for i in range(len(points_split) - 1):
            points_part = points[points_split[i]:points_split[i + 1]]
            poly_list.append(Polygon(points_part))

        # Use as mask polygon only when all key conditions are satisfied.
        # Memorize shape and bbox of polygon.
        x_range = min(points, key=lambda x: x[0])[0], max(points, key=lambda x: x[0])[0]
        y_range = min(points, key=lambda x: x[1])[1], max(points, key=lambda x: x[1])[1]
        maskdata.append((x_range, y_range, poly_list))

    # Filtering
    n_mesh = reader_mesh.numRecords
    cnt_mesh = 0
    for data in reader_mesh.iterShapeRecords():
        center = Polygon(data.shape.points).centroid
        x = center.x
        y = center.y

        masked = False
        for x_range, y_range, mask_polys in maskdata:
            # Primary screening by mask polygon bbox.
            if x < x_range[0] or x > x_range[1] or y < y_range[0] or y > y_range[1]:
                continue

            mask_count = sum(poly.contains(center) for poly in mask_polys)
            if mask_count % 2 == 1:
                masked = True
                break

        if masked == flg_mask:
            # This polygon is output target.
            writer.shape(data.shape)
            writer.record(*data.record)

        cnt_mesh = cnt_mesh + 1
        if cnt_mesh % 100000 == 0:
            print("{0}K / {1}K".format(cnt_mesh/1000, n_mesh/1000))

    writer.close()
Example #25
0
def extract_aquifers(directory, HUC8, aquifers, pad=0.2, verbose=True):
    """Extracts aquifers from the source datafile to the destination using
    the HUC8 boundaries for the query."""

    start = time.time()

    # open up the HUC8 boundary shapefile and use it to get the bounding box

    shapefile = Reader(directory + '/%s/%scatchments' % (HUC8, HUC8))

    xmin, ymin, xmax, ymax = get_boundaries(shapefile.shapes())

    # convert to bounding corners for testing

    p1 = [xmin - pad * (xmax - xmin), ymin - pad * (ymax - ymin)]
    p2 = [xmax + pad * (xmax - xmin), ymax + pad * (ymax - ymin)]

    shapefile = None

    # start by copying the projection files

    if verbose: print('\ncopying the projections\n')

    shutil.copy(directory + '/%s/%scatchments.prj' % (HUC8, HUC8),
                directory + '/%s/%saquifers.prj' % (HUC8, HUC8))

    # open the flowline file

    if verbose: print('reading the aquifer file\n')

    shapefile = Reader(aquifers, shapeType=5)

    # work around for issues with pyshp

    records = []
    for i in range(len(shapefile.shapes())):
        try:
            records.append(shapefile.record(i))
        except:
            records.append('')

    # use the bounding boxes to see if the shapes are within the watershed area

    if verbose: print('searching for aquifers in the watershed\n')

    bboxes = [shapefile.shape(i).bbox for i in range(len(records))]

    corners = [[[b[0], b[1]], [b[0], b[3]], [b[2], b[1]], [b[2], b[3]]]
               for b in bboxes]

    indices = [
        i for i, c in zip(range(len(corners)), corners)
        if any([inside_box(p1, p2, p) for p in c])
        or all([inside_box(c[0], c[3], p1),
                inside_box(c[0], c[3], p2)])
    ]

    # remove any non aquifers

    indices = [i for i in indices if shapefile.record(i)[4] != 999]

    # find a record for the non aquifer

    i = 0
    while shapefile.record(i)[4] != 999:
        i += 1

    nonrecord = shapefile.record(i)
    nonrecord[1] = nonrecord[1].decode('utf-8')
    nonrecord[5] = 0
    nonrecord[6] = 0

    if len(indices) == 0:
        if verbose: print('query returned no values, returning\n')
        return

    # write the data from the HUC8 to a new shapefile

    w = Writer(directory + '/%s/%saquifers' % (HUC8, HUC8), shapeType=5)

    for field in shapefile.fields:
        w.field(*field)

    for i in indices:
        shape = shapefile.shape(i)

        # check for multiple parts

        if len(shape.parts) > 1:
            parts = [
                shape.points[i:j]
                for i, j in zip(shape.parts[:-1], shape.parts[1:])
            ]
        else:
            parts = [shape.points]

        record = records[i]

        # little work around for blank binary values

        if isinstance(record[1], bytes):
            record[1] = record[1].decode('utf-8')

        w.poly(shapeType=5, parts=parts)
        w.record(*record)

    # add a shape for the bounding box showing no aquifer locations

    part = [p1, [p1[0], p2[1]], p2, [p2[0], p1[1]]]

    w.poly(shapeType=5, parts=[part])
    w.record(*nonrecord)

    w.close()

    end = time.time()

    if verbose:
        print('successfully queried data in %.2f seconds\n' % (end - start))
Example #26
0
    def plot_gage_subbasin(self, hspfmodel, folder):
        """Makes a plot of the subbasin area."""

        subbasinfile  = '{}/subbasins'.format(folder)
        boundaryfile  = '{}/boundary'.format(folder)
        flowfile      = '{}/flowlines'.format(folder)
        combinedfile  = '{}/combined'.format(folder)
        watershedplot = '{}/watershed.png'.format(folder)

        # make a shapefile of the subbasins for the watershed

        f = '{0}/{1}/{1}subbasins'.format(self.directory, self.HUC8)
        for out in (subbasinfile, boundaryfile, flowfile, combinedfile):
            if not os.path.isfile(out + '.prj'):
                shutil.copy(f + '.prj', out + '.prj')

        if not os.path.isfile(subbasinfile + '.shp'):

            subshapes  = []
            subrecords = []
            for subbasin in hspfmodel.subbasins:

                f = '{0}/{1}/{2}/combined'.format(self.directory, self.HUC8, 
                                                  subbasin)
                s = Reader(f, shapeType = 5)

                subshapes.append(s.shape(0).points)
                subrecords.append(s.record(0))

            w = Writer(shapeType = 5)

            for field in s.fields:    w.field(*field)
            for record in subrecords: w.record(*record)
            for shape in subshapes:   w.poly(shapeType = 5, parts = [shape])

            w.save(subbasinfile)

        if not os.path.isfile(combinedfile + '.shp'):

            fshapes    = []
            frecords   = []
            for subbasin in hspfmodel.subbasins:
                f = '{0}/{1}/{2}/combined_flowline'.format(self.directory, 
                                                           self.HUC8, 
                                                           subbasin)
                r = Reader(f, shapeType = 3)

                fshapes.append(r.shape(0).points)
                frecords.append(r.record(0))

            w = Writer(shapeType = 3)

            for field in r.fields:  w.field(*field)
            for record in frecords: w.record(*record)
            for shape in fshapes:   w.poly(shapeType = 3, parts = [shape])

            w.save(combinedfile)

        # merge the shapes into a watershed

        if not os.path.exists(boundaryfile + '.shp'):

            merge_shapes(subbasinfile, outputfile = boundaryfile)

        # make a flowline file for the subbasins for the watershed

        if not os.path.isfile(flowfile + '.shp'):

            shapes  = []
            records = []
            for subbasin in hspfmodel.subbasins:
                f = '{0}/{1}/{2}/flowlines'.format(self.directory, 
                                                   self.HUC8, subbasin)
                r = Reader(f, shapeType = 3)
                for shape  in r.shapes():  shapes.append(shape.points)
                for record in r.records(): records.append(record)

            w = Writer(shapeType = 3)

            for field in r.fields: w.field(*field)
            for record in records: w.record(*record)
            for shape in shapes:   w.poly(shapeType = 3, parts = [shape])

            w.save(flowfile)

        if not os.path.isfile(watershedplot):

            plot_gage_subbasin(folder, self.HUC8, self.gageid, hspfmodel,
                               output = watershedplot)
Example #27
0
def extract_aquifers(directory, HUC8, aquifers, pad = 0.2, verbose = True):
    """Extracts aquifers from the source datafile to the destination using
    the HUC8 boundaries for the query."""

    start = time.time()

    # open up the HUC8 boundary shapefile and use it to get the bounding box

    shapefile = Reader(directory + '/%s/%scatchments' % (HUC8, HUC8))

    xmin, ymin, xmax, ymax = get_boundaries(shapefile.shapes())

    # convert to bounding corners for testing

    p1 = [xmin - pad * (xmax - xmin), ymin - pad * (ymax - ymin)]
    p2 = [xmax + pad * (xmax - xmin), ymax + pad * (ymax - ymin)]

    shapefile = None

    # start by copying the projection files

    if verbose: print('\ncopying the projections\n')

    shutil.copy(directory + '/%s/%scatchments.prj' % (HUC8, HUC8), 
                directory + '/%s/%saquifers.prj' % (HUC8, HUC8))

    # open the flowline file
    
    if verbose: print('reading the aquifer file\n')
    
    shapefile = Reader(aquifers, shapeType = 5)

    # work around for issues with pyshp

    records   = []
    for i in range(len(shapefile.shapes())):
        try: records.append(shapefile.record(i))
        except: records.append('')
     
    # use the bounding boxes to see if the shapes are within the watershed area

    if verbose: print('searching for aquifers in the watershed\n')

    bboxes = [shapefile.shape(i).bbox for i in range(len(records))]

    corners = [[[b[0], b[1]], [b[0], b[3]], [b[2], b[1]], [b[2], b[3]]]
               for b in bboxes]

    indices = [i for i, c in zip(range(len(corners)), corners) if 
               any([inside_box(p1, p2, p) for p in c]) or 
               all([inside_box(c[0], c[3], p1), inside_box(c[0], c[3], p2)])]

    # remove any non aquifers

    indices = [i for i in indices if shapefile.record(i)[4] != 999]

    # find a record for the non aquifer

    i = 0
    while shapefile.record(i)[4] != 999: i+=1

    nonrecord = shapefile.record(i)
    nonrecord[1] = nonrecord[1].decode('utf-8')
    nonrecord[5] = 0
    nonrecord[6] = 0

    if len(indices) == 0:
        if verbose: print('query returned no values, returning\n')
        return
    
    # write the data from the HUC8 to a new shapefile
    
    w = Writer(shapeType = 5)
    
    for field in shapefile.fields:  w.field(*field)
    
    for i in indices:
        shape = shapefile.shape(i)

        # check for multiple parts

        if len(shape.parts) > 1:
            parts = [shape.points[i:j] 
                     for i, j in zip(shape.parts[:-1], shape.parts[1:])]
        else: parts = [shape.points]

        record = records[i]
    
        # little work around for blank binary values
    
        if isinstance(record[1], bytes):
            record[1] = record[1].decode('utf-8')

        w.poly(shapeType = 5, parts = parts)
        w.record(*record)

    # add a shape for the bounding box showing no aquifer locations

    part = [p1, [p1[0], p2[1]], p2, [p2[0], p1[1]]]

    w.poly(shapeType = 5, parts = [part])
    w.record(*nonrecord)
    
    w.save(directory + '/%s/%saquifers' % (HUC8, HUC8))
    
    end = time.time()
    
    if verbose: 
        print('successfully queried data in %.2f seconds\n' % (end - start))