Ejemplo n.º 1
0
    def iterfeatures(self, na='null', show_bbox=False):
        """
        Returns an iterator that yields feature dictionaries that comply with
        __geo_interface__

        Parameters
        ----------
        na : {'null', 'drop', 'keep'}, default 'null'
            Indicates how to output missing (NaN) values in the GeoDataFrame
            * null: ouput the missing entries as JSON null
            * drop: remove the property from the feature. This applies to
                    each feature individually so that features may have
                    different properties
            * keep: output the missing entries as NaN

        show_bbox : include bbox (bounds) in the geojson. default False
        """
        if na not in ['null', 'drop', 'keep']:
            raise ValueError('Unknown na method {0}'.format(na))

        ids = np.array(self.index, copy=False)
        geometries = np.array(self[self._geometry_column_name], copy=False)

        properties_cols = self.columns.difference([self._geometry_column_name])

        if len(properties_cols) > 0:
            # convert to object to get python scalars.
            properties = self[properties_cols].astype(object).values
            if na == 'null':
                properties[pd.isnull(self[properties_cols]).values] = None

            for i, row in enumerate(properties):
                geom = geometries[i]

                if na == 'drop':
                    properties_items = dict((k, v) for k, v
                                            in zip(properties_cols, row)
                                            if not pd.isnull(v))
                else:
                    properties_items = dict((k, v) for k, v
                                            in zip(properties_cols, row))

                feature = {'id': str(ids[i]),
                           'type': 'Feature',
                           'properties': properties_items,
                           'geometry': mapping(geom) if geom else None}

                if show_bbox:
                    feature['bbox'] = geom.bounds if geom else None
                yield feature

        else:
            for fid, geom in zip(ids, geometries):
                feature = {'id': str(fid),
                           'type': 'Feature',
                           'properties': {},
                           'geometry': mapping(geom) if geom else None}
                if show_bbox:
                        feature['bbox'] = geom.bounds if geom else None
                yield feature
def main(infile, outfile, driver):

    with fio.open(infile) as src:
        meta = src.meta
        meta['driver'] = driver

    with fio.open(infile) as src, fio.open(outfile, 'w', **meta) as dst:
        with click.progressbar(src) as features:
            for feat in features:

                east = deepcopy(feat)
                west = deepcopy(feat)

                east_geom = shape(east['geometry'])
                west_geom = shape(west['geometry'])

                # if 'Point' not in asShape(feat['geometry']).type:
                #     east_geom = east_geom.simplify(0.0001).buffer(0)
                #     west_geom = west_geom.simplify(0.0001).buffer(0)

                east_geom = translate(east_geom, xoff=180)
                west_geom = translate(west_geom, xoff=-180)

                if not east_geom.is_empty:
                    east['geometry'] = mapping(east_geom)
                    dst.write(east)

                if not west_geom.is_empty:
                    west['geometry'] = mapping(west_geom)
                    dst.write(west)
Ejemplo n.º 3
0
def export_endpoints(shapefile, cross_sections, driver=None, epsg=None):
    # crs from epsg code
    if epsg is not None:
        crs = from_epsg(epsg)
    else:
        crs = None

    # schema
    schema = {'geometry': 'Point', 'properties': {'label': 'str'}}

    # shapefile write arguments
    shape_kwargs = {
        'driver': driver,
        'schema': schema,
        'crs': crs,
        }
    log.info('writing to {f:}'.format(f=os.path.basename(shapefile)))
    with fiona.open(shapefile, 'w', **shape_kwargs) as dst:
        for cs in cross_sections:
            startpoint = Point(cs.shape.coords[0])
            endpoint = Point(cs.shape.coords[-1])
            dst.write({
                'geometry': mapping(startpoint),
                'properties': {'label': cs.label}
                })
            dst.write({
                'geometry': mapping(endpoint),
                'properties': {'label': cs.label + '`'}
                })
Ejemplo n.º 4
0
    def apply_shapely(self, method, args=None, call=True, out_geomtype=None,
                      **kwargs):
        coll = self.collection()
        out_schema = coll.schema.copy()
        if not args:
            args = []
        if out_geomtype:
            out_schema['geometry'] = out_geomtype

        tempds = self.tempds(method)
        with fiona.collection(tempds, "w", "ESRI Shapefile",
                              out_schema, crs=self.crs) as out_collection:
            for in_feature in coll:
                out_feature = in_feature.copy()
                if call:
                    geom = mapping(
                        getattr(shape(in_feature['geometry']),
                                method)(*args, **kwargs)
                    )
                else:
                    # it's not a method, it's a property
                    geom = mapping(
                        getattr(shape(in_feature['geometry']), method)
                    )

                out_feature['geometry'] = geom
                out_collection.write(out_feature)
        return Layer(tempds)
Ejemplo n.º 5
0
def make_shapefile(data, name):
    path = os.path.join('op_data',name + '.shp')
    crs = crs = from_epsg('29902')
    if type(data) == dict:
        a_schema = {'geometry': 'Point',
                            'properties': {'name':'str', 'address':'str'}
                    }
        with fiona.open(path, "w",
                        driver= 'ESRI Shapefile',
                        crs= crs,
                        schema= a_schema) as output:
            for k, v in data.items():
                parts = k.split(',')
                name = parts[0]
                output.write({
                            'properties':{'name':name, 'address':k},
                              'geometry':geometry.mapping(v)})
    else:
        geom_type = data.geom_type

        a_schema = {'geometry': geom_type,
                            'properties': {'name':'str'}
                           }
        with fiona.open(path, "w",
                        driver= 'ESRI Shapefile',
                        crs= crs,
                        schema= a_schema) as output:
            output.write({
                        'properties':{'name':name},
                          'geometry':geometry.mapping(data)})
def delineate(bfe_filename, bfe_elev_field, contour_filename, contour_elev_field, out_filename):
    # Import contours ------------------------------------
    print 'Importing contours... '
    now = dt.now()
    contours = ad.import_contours(contour_filename, contour_elev_field, chatty=True)
    crs = ad.get_crs(contour_filename)
    time_diff = dt.now() - now
    print 'Imported', len(contours), 'contours in', time_diff, 'seconds'

    # Import BFEs ----------------------------------------------
    bfes = ad.ez_bfe_import(bfe_filename, bfe_elev_field)
    num_bfes = len(bfes)-1
    print 'Imported bfes from', bfes[0].elevation, 'to', bfes[-1].elevation

    # Delineate ---------------------------------------------
    now = dt.now()
    left_lines, right_lines = ad.delineate_by_bfes(bfes, contours)
    time_diff = dt.now() - now
    print num_bfes, 'bfe pairs completed in ',  time_diff
    print time_diff/(num_bfes), 'seconds per bfe pair'

    # Export to shapefile
    schema = {'geometry': 'LineString', 'properties': {'status': 'str:25'}}
    with fiona.open(out_filename, 'w', driver='ESRI Shapefile', crs=crs, schema=schema) as out:
        for left in left_lines:
            out.write({'geometry': mapping(left.shapely_geo), 'properties': {'status': left.status}})
        for right in right_lines:
            out.write({'geometry': mapping(right.shapely_geo), 'properties': {'status': right.status}})
    print 'Finished exporting to shapefile'
Ejemplo n.º 7
0
def test_rasterize():
    geodict = GeoDict({'xmin':0.5,'xmax':3.5,
                       'ymin':0.5,'ymax':3.5,
                       'dx':1.0,'dy':1.0,
                       'ny':4,'nx':4})
    print('Testing rasterizeFromGeometry() burning in values from a polygon sequence...')
    #Define two simple polygons and assign them to shapes
    poly1 = [(0.25,3.75),(1.25,3.25),(1.25,2.25)]
    poly2 = [(2.25,3.75),(3.25,3.75),(3.75,2.75),(3.75,1.50),(3.25,0.75),(2.25,2.25)]
    shape1 = {'properties':{'value':5},'geometry':mapping(Polygon(poly1))}
    shape2 = {'properties':{'value':7},'geometry':mapping(Polygon(poly2))}
    shapes = [shape1,shape2]
    print('Testing burning in values where polygons need not contain pixel centers...')
    grid = Grid2D.rasterizeFromGeometry(shapes,geodict,fillValue=0,attribute='value',mustContainCenter=False)
    output = np.array([[5,5,7,7],
                       [5,5,7,7],
                       [0,0,7,7],
                       [0,0,0,7]])
    np.testing.assert_almost_equal(grid.getData(),output)
    print('Passed burning in values where polygons need not contain pixel centers.')

    print('Testing burning in values where polygons must contain pixel centers...')
    grid2 = Grid2D.rasterizeFromGeometry(shapes,geodict,fillValue=0,attribute='value',mustContainCenter=True)
    output = np.array([[5,0,7,0],
                       [0,0,7,7],
                       [0,0,0,7],
                       [0,0,0,0]])
    np.testing.assert_almost_equal(grid2.getData(),output)
    print('Passed burning in values where polygons must contain pixel centers.')
def create_t6_deserts(desert_geom, b_box, mask_metadata):
    """"""

    geom_list = list()
    with fiona.open(t6_block_groups) as block_groups:
        t6_metadata = block_groups.meta.copy()

        with fiona.open(t6_desert_feats, 'w', **t6_metadata) as t6_deserts:
            for bg in block_groups:
                geom = shape(bg['geometry'])
                props = bg['properties']

                # 'neither' is misspelled in dataset so (sic)
                if props['min_pov'] != 'niether' and \
                        geom.intersects(desert_geom):
                    geom_list.append(geom)

                    new_geom = geom.intersection(desert_geom)
                    bg['geometry'] = mapping(new_geom)
                    t6_deserts.write(bg)

    t6_geom = unary_union(geom_list)
    t6_desert_geom = t6_geom.intersection(desert_geom)
    t6_mask_geom = b_box.difference(t6_desert_geom)

    with fiona.open(t6_desert_mask, 'w', **mask_metadata) as t6_mask:
        feat = {
            'geometry': mapping(t6_mask_geom),
            'properties': {
                'id': 1
            }
        }
        t6_mask.write(feat)
Ejemplo n.º 9
0
def olson_transform(geojson, scale_values):
    """
    Scale GeoJSON features.

    Inplace scaling transformation of each polygon of the geojson provided
    according to the "scale values" also provided.

    Parameters
    ----------
    geojson: dict
        The geojson of polygon to transform
        (it might be useful to have choosen an appropriate projection as we
        want to deal with the area)
    scale_values: list
        The pre-computed scale values for olson transformation
        (1 = no transformation)

    Returns
    -------
        Nothing
    """
    if len(geojson["features"]) != len(scale_values):
        raise ValueError("Inconsistent number of features/values")
    for val, feature in zip(scale_values, geojson['features']):
        geom = shape(feature["geometry"])
        feature['properties']['ref_area'] = geom.area
        if hasattr(geom, '__len__'):
            feature["geometry"] = mapping(
                MultiPolygon([scale(g, xfact=val, yfact=val) for g in geom]))
        else:
            feature["geometry"] = mapping(scale(geom, xfact=val, yfact=val))
    geojson['features'].sort(
        key=lambda x: x['properties']['ref_area'], reverse=True)
def get_employers_near_stops():
    """"""

    distance = 5280 / 2
    filter_field = 'LINE'
    filter_vals = ['O']

    stop_buffers = dict()
    stop_names = dict()
    with fiona.open(RAIL_STOP) as rail_stop:
        for fid, feat in rail_stop.items():
            fields = feat['properties']
            if fields[filter_field] in filter_vals:
                geom = shape(feat['geometry'])
                feat['geometry'] = mapping(geom.buffer(distance))
                stop_buffers[fid] = feat
                stop_names[fid] = fields['STATION']

    wb = load_workbook(GEOCODED)
    ws = wb.worksheets[0]

    header = [str(cell.value) for cell in ws.rows[0]]
    x_ix = header.index('X Coordinate')
    y_ix = header.index('Y Coordinate')

    employers = dict()
    for i, row in enumerate(ws.iter_rows(row_offset=1)):
        x, y = row[x_ix].value, row[y_ix].value
        if x and y:
            feat = dict()
            x, y = float(x), float(y)
            feat['geometry'] = mapping(Point(x, y))
            field_vals = [cell.value for cell in row]
            feat['properties'] = OrderedDict(zip(header, field_vals))
            employers[i] = feat

    join_mapping = spatial_join(stop_buffers, employers)

    with open(EMP_STATIONS, 'wb') as emp_stations:
        emp_writer = csv.writer(emp_stations)
        station_header = ['Stations'] + header
        emp_writer.writerow(station_header)

        for i, row in enumerate(ws.iter_rows(row_offset=1)):
            if i in join_mapping:
                station_ids = join_mapping[i]
                station_str = ', '.join([stop_names[sid] for sid in station_ids])
                csv_row = [station_str]
                for cell in row:
                    value = cell.value
                    if isinstance(value, unicode):
                        value = value.encode('utf-8')
                    csv_row.append(value)

                emp_writer.writerow(csv_row)
Ejemplo n.º 11
0
    def __init__(self, data_footprint: List[Tuple[float, float]]):
        """Construct data and tile footprints using the points from product metadata

        Points are assumed to be in ll, lr, ur, ul order
        """

        data_poly = normalize_footprint(data_footprint + [data_footprint[0]])
        tile_poly = MultiPolygon([x.envelope for x in data_poly])
        data_polygon = mapping(data_poly)
        tile_polygon = mapping(tile_poly)
        self.data_polygon = data_polygon
        self.tile_polygon = tile_polygon
Ejemplo n.º 12
0
def get_tile_geometry(path, origin_espg, tolerance=500):
    """ Calculate the data and tile geometry for sentinel-2 tiles """

    with rasterio.open(path) as src:

        # Get tile geometry
        b = src.bounds
        tile_shape = Polygon([(b[0], b[1]), (b[2], b[1]), (b[2], b[3]), (b[0], b[3]), (b[0], b[1])])
        tile_geojson = mapping(tile_shape)

        # read first band of the image
        image = src.read(1)

        # create a mask of zero values
        mask = image == 0.

        # generate shapes of the mask
        novalue_shape = shapes(image, mask=mask, transform=src.affine)

        # generate polygons using shapely
        novalue_shape = [Polygon(s['coordinates'][0]) for (s, v) in novalue_shape]

        if novalue_shape:

            # Make sure polygons are united
            # also simplify the resulting polygon
            union = cascaded_union(novalue_shape)

            # generates a geojson
            data_shape = tile_shape.difference(union)

            # If there are multipolygons, select the largest one
            if data_shape.geom_type == 'MultiPolygon':
                areas = {p.area: i for i, p in enumerate(data_shape)}
                largest = max(areas.keys())
                data_shape = data_shape[areas[largest]]

            # if the polygon has interior rings, remove them
            if list(data_shape.interiors):
                data_shape = Polygon(data_shape.exterior.coords)

            data_shape = data_shape.simplify(tolerance, preserve_topology=False)
            data_geojson = mapping(data_shape)

        else:
            data_geojson = tile_geojson

        # convert cooridnates to degrees
        return (to_latlon(tile_geojson, origin_espg), to_latlon(data_geojson, origin_espg))
Ejemplo n.º 13
0
 def get_bbox_or_point(bbox):
     """
     Determine whether the bounds are a single point or bounding box area
     """
     # first check if coordinates are within valid bounds
     if (all(abs(x) <= 180  for x in bbox[::2]) and
         all(abs(y) <= 90 for y in bbox[1::2])):
         if len(bbox) == 4 and bbox[0:2] == bbox[2:4]:
             return mapping(Point(bbox[0:2]))
         else:
             # d3 expects poly coordinates in clockwise order (?)
             return mapping(box(*bbox, ccw=False))
     else:
         # If the point/bbox lies outside of valid bounds, don't generate
         # geojson
         return None
Ejemplo n.º 14
0
 def feature(i, row):
     return {
         "id": str(i),
         "type": "Feature",
         "properties": dict((k, v) for k, v in iteritems(row) if k != "geometry"),
         "geometry": mapping(row["geometry"]),
     }
Ejemplo n.º 15
0
def get_ibound_from_huc(huc_data_shp=None,ibound_output_shp=None,ibound_huc_scale=None,extents_huc_list=None):
    '''Reduces the shapefile used for the ibound zonation scheme to only those
    polygons (i.e., HUCs) that are in the active model area.  For higher resolution
    zonation (i.e., using smaller HUCs for the IBOUND zonation scheme) this is much
    faster than clipping the zonation shapefile before rasterizing.'''

    print '\nReducing the IBOUND zonation shapefile to the active model area.\n' 
    
    zone_id_field = 'HUC' + str(ibound_huc_scale)
    
    with fiona.open(huc_data_shp,'r') as vin:
        
        driver,crs,schema = vin.driver,vin.crs,vin.schema
        schema['properties'] = {zone_id_field:'str'}
        
        with fiona.open(ibound_output_shp,'w',driver=driver,crs=crs,schema=schema) as vout:
        
            for feature in vin:
                izone = int(feature['properties']['HUC' + str(ibound_huc_scale)])
                check_izone = str(izone).zfill(int(ibound_huc_scale))
                
                for ihuc in extents_huc_list:
                    if (check_izone.startswith(ihuc)):
                        
                        igeometry = shape(feature['geometry'])
                        vout.write({'geometry': mapping(igeometry),'properties':{zone_id_field:izone}})

    return
Ejemplo n.º 16
0
def get_extents_from_huc(huc_data_shp=None,extents_output_shp=None,extents_huc_list=None):
    '''Extracts a user-specified HUC or list of HUCs from the national dataset and writes it
    to a shapefile. 'huc_data_shp'=shapefile that includes the huc polygons
    that will be extracted.'''
       
    extents_huc_scale = len(extents_huc_list[0])
    huc_field = 'HUC' + str(extents_huc_scale)
    
    with fiona.open(huc_data_shp) as vin:
        schema = vin.schema
        crs = vin.crs
        driver = vin.driver        
    
    # Reduce the extract schema to only the huc id field    
    schema['properties'] = {huc_field:'str'}
    
    # Now write the model domain shapefile     
    with fiona.open(huc_data_shp) as vect_in:
        polygon_list = []        
        for feature in vect_in:
            if (feature['properties'][huc_field] in extents_huc_list): 
                polygon_list.append(shape(feature['geometry']))
                merged = unary_union(polygon_list)

    with fiona.open(extents_output_shp,'w',driver=driver,crs=crs,schema=schema) as extract_out:
        extract_out.write({'geometry': mapping(merged),'properties':{huc_field:'Merged'}})
    
    return
Ejemplo n.º 17
0
    def remap_node_number(node_atrb1, node_atrb2, inshp, outshp, type, prj, error_logfile):
        with fiona.collection(inshp, "r") as input:

            schema = {'geometry': type,
                      'properties': {'node': 'int'}}

            with fiona.collection(outshp, "w", "ESRI Shapefile", schema) as output:
                for node in input:
                    node_num1 = node['properties'][node_atrb1]
                    node_num2 = node['properties'][node_atrb2]

                    # pick a node number
                    if node_num1 == 0 or node_num1 == node_num2:
                        node_num = node_num2
                    elif node_num2 == 0:
                        node_num = node_num1
                    else:
                        error_logfile.write("Warning! node number conflict. MFgrid node number: {}, "
                                  "Existing SFR node number: {}\n".format(node_num1, node_num2))

                    print "\rnode {:d}".format(node_num),

                    output.write({'properties': {'node': node_num},
                                  'geometry': mapping(shape(node['geometry']))})
        # copy over prj file
        shutil.copyfile(prj, "{}.prj".format(outshp[:-4]))
def createShapefileFromCSV(inCSV): 
    #need dataframe as df
    #need uid as unique ID
    #need lng field as longitude
    #need lat field as latitude
    
    df = pd.read_csv(inCSV)
    ioSHP = inCSV.replace('.csv','.shp')
    data  = df

    lng = 'lng'
    lat = 'lat'

    data = data

    schema = { 'geometry': 'Point', 'properties': { 'uid': 'str','lat':'float','lng':'float'} }

    with collection(ioSHP, "w", "ESRI Shapefile", schema) as output:
        for index, row in data.iterrows():
            point = Point(row[lng], row[lat])
            output.write({

                'properties': {'uid': row['uid'],'lat': row['lat'],'lng': row['lng']},
                'geometry': mapping(point)
            })

    print 'shapefile has a shapefile created'
Ejemplo n.º 19
0
  def buffer(self, toBuffer, outFile, distance, dissolve):

    with fiona.open(toBuffer, 'r') as input:
      schema = input.schema
      crs = input.crs
      schema['geometry'] = 'Polygon'
      
      buf_features = []
      for f in input:   
        buf_features.append(( shape(f['geometry']).buffer(distance), f['properties'] ))
      
      if dissolve == True:
        buf_features = cascaded_union([geom for geom, prop in buf_features])
        schema = {'geometry':buf_features.geom_type, 'properties':{'fid':'int'}}
        buf_features = [(buf_features, {'fid':'1'})]
   

    #in windows compiled shapely library python crashes if str has 255 characters
    #works without this block in source compiled verions 
    #--------------------------------------------------
    for k, v in schema['properties'].items():
      if v[0:3] == 'str' and v[-3:] == '255':
        schema['properties'][k] = 'str:254'
    #--------------------------------------------------
   
    with fiona.open(outFile, 'w', 'ESRI Shapefile', crs=crs, schema=schema) as output:
      for geom, prop in buf_features: 
        output.write({'geometry': mapping(geom), 'properties':prop})
Ejemplo n.º 20
0
Archivo: utils.py Proyecto: Enaith/oggm
 def feature(i, row):
     return {
         'id': str(i),
         'type': 'Feature',
         'properties':
             dict((k, v) for k, v in iteritems(row) if k != 'geometry'),
         'geometry': mapping(row['geometry'])}
Ejemplo n.º 21
0
def build_sparse_grid(tweet_collection,
                      grid_collection,
                      skip_users=True,
                      drop=True):
    """
    Takes about 30 minutes.

    """
    if drop:
        grid_collection.drop()

    hci = twitterproj.hashtag_counts_in
    if skip_users:
        uids = twitterproj.subcollections.get_skip_users()
    else:
        uids = None

    for i, cell in enumerate(us_grid()):
        counts, skipped = hci(tweet_collection, cell, uids)
        doc = OrderedDict()
        doc['geometry'] = mapping(cell)
        doc['counts'] = counts
        # increase latitudes to max lat, then increases longitude
        doc['_id'] = i
        try:
            grid_collection.insert(doc)
        except pymongo.errors.DocumentTooLarge:
            # Since we are specifying a unique id, let the error raise.
            raise
Ejemplo n.º 22
0
def parse_tcx3(infiles):
    schema = { 'geometry': 'LineString', 'properties': {} }
    with collection(
        "lines.shp", "w", "ESRI Shapefile", schema) as output:
        for infile in infiles:
            print "processing %s" % infile
            soup = bss(open(infile,'r'))

            ls = []
            # Activity
            for activity in soup.findAll('activity'):

                # Lap
                for lap in activity.findAll('lap'):
                    # Track
                    for track in lap.findAll('track'):

                        # Trackpoint
                        for point in track.findAll('trackpoint'):
                            try:
                                coords = [float(x) for x in
                                         [point.position.longitudedegrees.string,
                                          point.position.latitudedegrees.string]]
                                ls.append(coords)
                            except: coords = None
            if len(ls) > 2:
                output.write({
                    'properties': {
                    },
                    'geometry': mapping(LineString(ls))
                })
Ejemplo n.º 23
0
 def insertfeatures(self, features):
     trees = defaultdict(list)   
     geomNames = {}     
     for path, attrs in features.iteritems():
         dest = os.path.dirname(path)
         fid = os.path.basename(path)
         filteredAttrs = {}
         geom = None
         for attrName, attrValue in attrs.iteritems():
             if isinstance(attrValue, BaseGeometry):
                 if geom is not None:
                     raise GeoGitException("Cannot insert feature with more than one geometry attribute")
                 geom = attrValue
                 geomNames[dest] = attrName
             else:
                 filteredAttrs[attrName] = attrValue  
         geommap = mapping(geom)            
         trees[dest].append(geojson.Feature(id=fid, geometry=geommap, properties=filteredAttrs))
     for tree, features in trees.iteritems():
         fco = geojson.FeatureCollection(features=features)
         json = geojson.dumps(fco)        
         try:
             f = tempfile.NamedTemporaryFile(delete = False)                           
             f.write(json)              
             f.close()
             self.importgeojson(f.name, add = True, dest = tree, geomName = geomNames.get(tree))            
         finally:
             f.close() 
             try:
                 os.remove(f.name)
             except:
                 pass 
    def process_file(self, inFile, outFile, region):
        # process file is the final processing step which writes to the user-defined outputFile
        with fiona.open(inFile, 'r', encoding='utf-8') as input:
            input_driver = input.driver
            input_crs = input.crs
            input_schema = input.schema.copy()
            input_schema['properties']['shield_typ'.encode("utf-8")] = 'str:254'
            input_schema['properties']['label'.encode("utf-8")] = 'str:254'
            input_schema['properties']['seg_len'] = 'int:10'
            input_schema['properties']['label_len'] = 'int:10'
            with fiona.open(outFile, 'w', driver=input_driver, crs=input_crs, schema=input_schema, encoding='utf-8') as output:
                for item in input:
                    shield_val = self.create_shield_type(item,region)
                    item['properties']['shield_typ'] = shield_val
                    label_val = self.create_label(item,region)
                    item['properties']['label'] = label_val
                    segment_length_val = shape(item['geometry']).length
                    item['properties']['seg_len'] = segment_length_val
                    # remove items that have no value in the label field
                    if label_val is None:
                        continue
                    # measure the length of characters in the label field
                    label_length_val = len(label_val)
                    # for USA region only, remove items that have a label length >5 or = 0
                    if region == "USA" and (label_length_val > 5 or label_length_val == 0):
                        continue
                    item['properties']['label_len'] = label_length_val

                    output.write({'properties': item['properties'],'geometry': mapping(shape(item['geometry']))})
Ejemplo n.º 25
0
def poly_point_simplify(features, min_area):
    for feature in features:
        geom = asShape(feature['geometry'])
        if geom.area < min_area:
            newgeom = geom.centroid
            feature['geometry'] = mapping(newgeom)
        yield feature
 def dissolve (self, inFile, outFile):
     # create dictionary for storing the uniqueRefs
     uniqueRefs = {}
     with fiona.open(inFile, 'r', encoding='utf-8') as input:
         input_driver = input.driver
         input_crs = input.crs
         input_schema = {'geometry': 'MultiLineString','properties': {'ref'.encode("utf-8"): 'str:254'}}
         with fiona.open(outFile, 'w', driver=input_driver, crs=input_crs, schema=input_schema, encoding='utf-8') as output:
             for item in input:
                 # extract the key, if the 'ref' attribute is NOT called 'ref' 
                 # you can insert the different attribute name HERE (and only HERE).
                 key = item['properties']['ids_and_re']
                 geom = shape(item['geometry'])
                 # find all motorways within the New Zealand mainland
                 # and remove all letters per  
                 newZeaBox = [(17920614.01, -4033681.682),(20362002, -4054837.565),(20357771.35, -6073108.484),(17683668.157,-6068877.308)]
                 newZeaPoly = Polygon(newZeaBox)
                 if geom.within(newZeaPoly):
                     key = re.sub(r'\D',"", key)
                 if not geom.type.startswith('Multi'):
                     geom = [geom]
                 for g in geom:
                     if key in uniqueRefs:
                         uniqueRefs[key].append(g)
                     else:
                         uniqueRefs[key] = [g]
             for key in uniqueRefs:
                 # omit lines that have blank 'ref' tags
                 if key is not None and key != 'None':
                     dissolve_feat = cascaded_union(uniqueRefs[key])
                     output.write({'geometry':mapping(dissolve_feat), 'properties': {'ref': key}})
def _standardize_value(v):
    if v is None:
        return v
    if isinstance(v, bool):
        return bool(v)
    if is_int(v):
        return int(v)
    if isinstance(v, str):
        return str(v)
    if is_float(v):
        return float(v)
    if isinstance(v, dict) or (pandas and isinstance(v, pandas.DataFrame)):
        return _standardize_dict(v)
    if isinstance(v, list):
        return [_standardize_value(elem) for elem in v]
    if isinstance(v, tuple):
        return tuple(_standardize_value(elem) for elem in v)
    if (numpy and isinstance(v, numpy.ndarray)) or (pandas and isinstance(v, pandas.Series)):
        return _standardize_value(v.tolist())
    if isinstance(v, datetime):
        return v.timestamp() * 1000  # convert from second to millisecond
    if isinstance(v, CanToDataFrame):
        return _standardize_dict(v.to_data_frame())
    if is_shapely_geometry(v):
        from shapely.geometry import mapping
        return json.dumps(mapping(v))
    try:
        return repr(v)
    except Exception as e:
        # TODO This needs a test case; Also exception should be logged somewhere
        raise Exception('Unsupported type: {0}({1})'.format(v, type(v))) from e
Ejemplo n.º 28
0
def linestrings_to_shapefile(linestrings, shp_fname):
    schema_properties = OrderedDict(
        [
            ("gid", "int")
        ]
    )

    my_schema = {
        "geometry": "LineString",
        "properties": schema_properties
    }

    my_driver = "ESRI Shapefile"
    my_crs = from_epsg(27700)

    with fiona.open(shp_fname, "w", driver=my_driver, crs=my_crs, schema=my_schema) as outpf:
        for gid in linestrings:
            linestring = linestrings[gid]

            outpf.write({
                "geometry": mapping(linestring),
                "properties": {
                    "gid": gid
                }
            })
def shp_writer(model_shp, geoDF, output_shp):
    with fiona.open(model_shp) as source:
        source_driver = source.driver
        source_crs = source.crs
        source_schema = source.schema
        #previous fields in properties are deleted
        del source_schema['properties']
        #a new field is set with its respectie data type
        source_schema['properties'] = {'mxint15min': 'float'}
        #writing a new file    
        with fiona.open(output_shp,
                        'w',
                        driver=source_driver,
                        crs=source_crs,
                        schema=source_schema) as collection:
            #rec = {'geometry': mapping(geoDF.loc[0].polygon),'properties':{'mxrai15min': 0.5}}
            #collection.write(rec)
            for i in geoDF.index:
                #create a record
                rec = {}
                #fill geometry
                rec['geometry'] = mapping(geoDF.loc[i].polygon)
                #fill attribute values
                intensity = float(geoDF.loc[i].maxrain_15min)
                rec['properties'] = {'mxint15min': intensity}
                collection.write(rec)
Ejemplo n.º 30
0
def extract_contours(array, tile, interval=100, pixelbuffer=0, field='elev'):
    """
    Extracts contour lines from an array in a given interval and returns them
    as GeoJSON-like objects using the source tile bounds as georeference
    """
    levels = _get_contour_values(array.min(), array.max(), interval=interval)
    if not levels:
        return []
    try:
        contours = plt.contour(array, levels)
    except:
        raise
    index = 0
    out_contours = []
    left = tile.bounds(pixelbuffer)[0]
    top = tile.bounds(pixelbuffer)[3]
    for level in range(len(contours.collections)):
        elevation = levels[index]
        index += 1
        paths = contours.collections[level].get_paths()
        for path in paths:
            out_coords = [(
                left + (i[1] * tile.pixel_x_size),
                top - (i[0] * tile.pixel_y_size),
            ) for i in zip(path.vertices[:, 1], path.vertices[:, 0])]
            if len(out_coords) >= 2:
                line = LineString(out_coords)
                out_contours.append({
                    'properties': {
                        field: elevation
                    },
                    'geometry': mapping(line)
                })

    return out_contours
Ejemplo n.º 31
0
    def query_iteratively(self,
                          searchAreaWkt,
                          query,
                          count=100,
                          ttl='10s',
                          index=default_index):
        '''
        Perform a vector services query using the QUERY API
        (https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields)

        If iterating through a page of results results in seeing duplicate records consistently,
        it's possible that the query context TTL is expiring before the page is finished being
        processed by the caller.  In that case, it's possible to raise the TTL duration by setting
        the 'ttl' parameter to something higher than the default of 10 seconds.  For example, to
        set the TTL to 30 seconds, use '30s'.  For one minute, use '1m'.

        Args:
            searchAreaWkt: WKT Polygon of area to search
            query: Elastic Search query
            count: Maximum number of results to return
            ttl: Amount of time for each temporary vector page to exist

        Returns:
            generator of vector results
    
        '''

        search_area_polygon = load_wkt(searchAreaWkt)
        geojson = json.dumps(mapping(search_area_polygon))

        params = {
            "q": query,
            "count": min(count, 1000),
            "ttl": ttl,
        }

        # initialize paging request
        url = self.query_index_page_url % index if index else self.query_page_url
        r = self.gbdx_connection.post(url, params=params, data=geojson)
        r.raise_for_status()
        page = r.json()
        paging_id = page['next_paging_id']
        item_count = int(page['item_count'])
        data = page['data']

        num_results = 0
        for vector in data:
            num_results += 1
            if num_results > count: break
            yield vector

        if num_results == count:
            return

        # get vectors from each page
        while paging_id and item_count > 0 and num_results < count:

            headers = {'Content-Type': 'application/x-www-form-urlencoded'}
            data = {"pagingId": paging_id, "ttl": ttl}

            r = self.gbdx_connection.post(self.page_url,
                                          headers=headers,
                                          data=data)
            r.raise_for_status()
            page = r.json()
            paging_id = page['next_paging_id']
            item_count = int(page['item_count'])
            data = page['data']

            for vector in data:
                num_results += 1
                if num_results > count: break
                yield vector
Ejemplo n.º 32
0
def save_image_crop(image_uri,
                    image_crop_uri,
                    label_uri=None,
                    label_crop_uri=None,
                    size=600,
                    min_features=10):
    """Save a crop of an image to use for testing.

    If label_uri is set, the crop needs to cover >= min_features.

    Args:
        image_uri: URI of original image
        image_crop_uri: URI of cropped image to save
        label_uri: optional URI of GeoJSON file
        size: height and width of crop

    Raises:
        ValueError if cannot find a crop satisfying min_features constraint.
    """
    if not file_exists(image_crop_uri):
        print('Saving test crop to {}...'.format(image_crop_uri))
        old_environ = os.environ.copy()
        try:
            request_payer = S3FileSystem.get_request_payer()
            if request_payer == 'requester':
                os.environ['AWS_REQUEST_PAYER'] = request_payer
            im_dataset = rasterio.open(image_uri)
            h, w = im_dataset.height, im_dataset.width

            extent = Box(0, 0, h, w)
            windows = extent.get_windows(size, size)
            if label_uri is not None:
                crs_transformer = RasterioCRSTransformer.from_dataset(
                    im_dataset)
                vs = GeoJSONVectorSource(label_uri, crs_transformer)
                geojson = vs.get_geojson()
                geoms = []
                for f in geojson['features']:
                    g = shape(f['geometry'])
                    geoms.append(g)
                tree = STRtree(geoms)

            def p2m(x, y, z=None):
                return crs_transformer.pixel_to_map((x, y))

            for w in windows:
                use_window = True
                if label_uri is not None:
                    w_polys = tree.query(w.to_shapely())
                    use_window = len(w_polys) >= min_features
                    if use_window and label_crop_uri is not None:
                        print('Saving test crop labels to {}...'.format(
                            label_crop_uri))

                        label_crop_features = [
                            mapping(shapely.ops.transform(p2m, wp))
                            for wp in w_polys
                        ]
                        label_crop_json = {
                            'type':
                            'FeatureCollection',
                            'features': [{
                                'geometry': f
                            } for f in label_crop_features]
                        }
                        json_to_file(label_crop_json, label_crop_uri)

                if use_window:
                    w = w.rasterio_format()
                    im = im_dataset.read(window=w)

                    with tempfile.TemporaryDirectory() as tmp_dir:
                        crop_path = get_local_path(image_crop_uri, tmp_dir)
                        make_dir(crop_path, use_dirname=True)

                        meta = im_dataset.meta
                        meta['width'], meta['height'] = size, size
                        meta['transform'] = rasterio.windows.transform(
                            w, im_dataset.transform)

                        with rasterio.open(crop_path, 'w', **meta) as dst:
                            dst.colorinterp = im_dataset.colorinterp
                            dst.write(im)

                        upload_or_copy(crop_path, image_crop_uri)
                    break

            if not use_window:
                raise ValueError('Could not find a good crop.')
        finally:
            os.environ.clear()
            os.environ.update(old_environ)
import fiona
from fiona.crs import from_epsg
from shapely.geometry import Point, mapping


# In[7]:

simpleschema = {'geometry': 'Point',
               'properties': {'name':'str'}}
wgs84_dir = os.path.join(output_dir, "wgs84")
if not os.path.exists(wgs84_dir):
    os.makedirs(wgs84_dir)
camera_fname = os.path.join(wgs84_dir, "cameraloc.shp")
with fiona.open(camera_fname, 'w', crs=from_epsg(4326),driver='ESRI Shapefile', schema=simpleschema) as output:
    point = Point(site.x, site.y)
    output.write({'properties': {'name': site.sitename},'geometry': mapping(point)})


# # Download the landsat8 scene over this area

# In[8]:

from landsat.search import Search
from landsat.downloader import Downloader


# In[9]:

s = Search()
results = s.search(lat=site.y, lon=site.x, limit=100)
scene_id = results['results'][1]['sceneID']
Ejemplo n.º 34
0
    def gdlToGisFile(self,
                     coords,
                     folderpath,
                     layername,
                     fmt="ESRI Shapefile",
                     epsg_cd=4326,
                     prop=None,
                     crtfld=True):
        """
        Dump geodesic line coords to ESRI Shapefile
        and GeoJSON Linestring Feature
            coords: input coords returned by gcComp.
            folderpath: folder to store output file.
            layername: output filename.
            fmt: output format ("ESRI Shapefile" (default), "GeoJSON").
            epsg_cd: Coordinate Reference System, EPSG code (default: 4326)
            prop: property
            
            crtfld: create folder if not exists (default: True).
        """

        schema = {'geometry': 'LineString', 'properties': {'prop': 'str'}}

        try:

            if fmt in ["ESRI Shapefile", "GeoJSON"]:
                ext = ".shp"
                if fmt == "GeoJSON":
                    ext = ".geojson"

                filepath = os.path.join(folderpath,
                                        "{0}{1}".format(layername, ext))

                self.__dest_folder(folderpath, crtfld)

                if fmt == "GeoJSON" and os.path.isfile(filepath):
                    os.remove(filepath)

                out_crs = from_epsg(epsg_cd)

                with collection(filepath, "w", fmt, schema,
                                crs=out_crs) as output:

                    line = LineString(coords)

                    geom = mapping(line)

                    if self.__antimeridian:
                        line_t = self.__antiMeridianCut(geom)
                    else:
                        line_t = geom

                    output.write({
                        'properties': {
                            'prop': prop
                        },
                        'geometry': line_t
                    })

                self.__logger.info("{0} succesfully created!".format(fmt))

            else:
                self.__logger.error("No format to store output...")
                return

        except Exception as e:
            self.__logger.error("Error: {0}".format(e))
            raise ExportGeodesicLineError(e)
Ejemplo n.º 35
0
def create_centerlines(src, dst, interpolation_distance=0.5):
    """Convert the geometries from the ``src`` file to centerlines in
    the ``dst`` file.

    Use the ``interpolation_distance`` parameter to adjust the level of
    detail you want the centerlines to be produced with.

    Only polygons and multipolygons are converted to centerlines,
    whereas the other geometries are skipped. The polygon's attributes
    are copied to its ``Centerline`` object.

    If the ``interpolation_distance`` factor does not suit the polygon's
    geometry, the ``TooFewRidgesError`` error is logged as a warning.
    You should try readjusting the ``interpolation_distance`` factor and
    rerun the command.

    :param src: path to the file containing input geometries
    :type src: str
    :param dst: path to the file that will contain the centerlines
    :type dst: str
    :param interpolation_distance: densify the input geometry's
        border by placing additional points at this distance, defaults
        to 0.5 [meter].
    :type interpolation_distance: float, optional
    :return: ``dst`` file is generated
    :rtype: None
    """

    with fiona.Env():
        with fiona.open(src, mode="r") as source_file:
            schema = source_file.schema.copy()
            schema.update({"geometry": "MultiLineString"})
            driver = get_ogr_driver(filepath=dst)
            with fiona.open(
                    dst,
                    mode="w",
                    driver=driver.GetName(),
                    schema=schema,
                    crs=source_file.crs,
                    encoding=source_file.encoding,
            ) as destination_file:
                for record in source_file:
                    geom = record.get("geometry")
                    input_geom = shape(geom)

                    attributes = record.get("properties")
                    try:
                        centerline_obj = Centerline(input_geom,
                                                    interpolation_distance,
                                                    **attributes)
                    except (InvalidInputTypeError, TooFewRidgesError) as error:
                        logging.warning(error)
                        continue

                    centerline_dict = {
                        "geometry": mapping(centerline_obj),
                        "properties": {
                            k: v
                            for k, v in centerline_obj.__dict__.items()
                            if k in attributes.keys()
                        },
                    }

                    destination_file.write(centerline_dict)

    return None
Ejemplo n.º 36
0
import pandas as pd
from shapely.geometry import Point, mapping
from fiona import collection

io1 = '/Users/danielmsheehan/Dropbox/GIS/Data/Municipal/USA/New_York/New_York_City/Pollen/NYCPS_lat_long_2013.csv'
io2 = '/Users/danielmsheehan/Dropbox/GIS/Data/Municipal/USA/New_York/New_York_City/Pollen/NYCPS_lat_long_2013_w.csv'

df = pd.read_csv(io1)
df.to_csv(io2, index=False)

schema = {
    'geometry': 'Point',
    'properties': {
        'SITEID': 'str'
    }
}  #{ 'city': 'str', 'zip': 'str' } }
#url = df #"http://goo.gl/WFylXY"
#data = pd.read_csv(url)
data = df
with collection(
        "/Users/danielmsheehan/Dropbox/GIS/Data/Municipal/USA/New_York/New_York_City/Pollen/nyc_pollen_sites.shp",
        "w", "ESRI Shapefile", schema) as output:
    for index, row in data.iterrows():
        point = Point(row['Longitude'], row['Latitude'])
        output.write({
            'properties': {
                'SITEID': row['SITEID']
            },  #{'city': row['city'], 'zip': row['zip_code']},
            'geometry': mapping(point)
        })
import fiona
from fiona.crs import from_epsg
from shapely import wkb, wkt
from shapely.geometry import mapping

if __name__ == '__main__':

    geom_string = '0101000020110F00009548A88F1AD163C1061A928AEAEA4C41'

    wkb_bytes = bytes.fromhex(geom_string)

    shape = wkb.loads(wkb_bytes)

    # to wkt:
    print(wkt.dumps(shape))

    # POINT (-10389716.4892924223095179 3790293.0825836686417460)

    # OR, IN POSTGIS:  select ST_AsText('0101000020110F00009548A88F1AD163C1061A928AEAEA4C41'::geometry)

    # to shapefile:

    schema = {'geometry': "Point", "properties": {"ID": "str"}}

    with fiona.open('test.shp',
                    'w',
                    driver='ESRI Shapefile',
                    crs=from_epsg(3857),
                    schema=schema) as dst:
        record = {"geometry": mapping(shape), "properties": {"ID": "1"}}
        dst.write(record)
def contour_extract(ds_array,
                    z_values,
                    ds_crs,
                    ds_affine,
                    output_shp,
                    min_vertices=2,
                    attribute_data=None,
                    attribute_dtypes=None,
                    dim='time',
                    verbose=True):
    """
    Uses `skimage.measure.find_contours` to extract multiple z-value contour lines from a two-dimensional array
    (e.g. multiple elevations from a single DEM), or one z-value for each array along a specified dimension of a 
    multi-dimensional array (e.g. to map waterlines across time by extracting a 0 NDVI contour from each individual 
    timestep in an xarray timeseries).    
    
    Contours are exported to file as a shapefile and returned as a geopandas geodataframe with one row per
    z-value or one row per array along a specified dimension. The `attribute_data` and `attribute_dtypes` parameters 
    can be used to pass custom attributes to the output contour features.

    Last modified: March 2019
    Author: Robbi Bishop-Taylor
    
    Parameters
    ----------  
    ds_array : xarra DataArray
        A two-dimensional or multi-dimensional array from which contours are extracted. If a two-dimensional array
        is provided, the analysis will run in 'single array, multiple z-values' mode which allows you to specify 
        multiple `z_values` to be extracted. If a multi-dimensional array is provided, the analysis will run in 
        'single z-value, multiple arrays' mode allowing you to extract contours for each array along the dimension
        specified by the `dim` parameter.  
    z_values : int, float or list of ints, floats
        An individual z-value or list of multiple z-values to extract from the array. If operating in 'single 
        z-value, multiple arrays' mode specify only a single z-value.
    ds_crs : string or CRS object
        Either a EPSG string giving the coordinate system of the array (e.g. 'EPSG:3577'), or a crs
        object (e.g. from an xarray dataset: `xarray_ds.geobox.crs`).
    ds_affine : affine.Affine object or GDAL geotransform
        Either an affine object from a rasterio or xarray object (e.g. `xarray_ds.geobox.affine`), or a gdal-derived
        geotransform object (e.g. `gdal_ds.GetGeoTransform()`) which will be converted to an affine.
    output_shp : string
        The path and filename for the output shapefile.
    min_vertices : int, optional
        The minimum number of vertices required for a contour to be extracted. The default (and minimum) value is 2,
        which is the smallest number required to produce a contour line (i.e. a start and end point). Higher values
        remove smaller contours, potentially removing noise from the output dataset.
    attribute_data : dict of lists, optional
        An optional dictionary of lists used to define custom attributes/fields to add to the shapefile. Dict keys 
        give the name of the shapefile field, while dict values must be lists of the same length as `z_values`
        (for 'single array, multiple z-values' mode) or the number of arrays along the dimension specified by the `dim`
        parameter (for 'single z-value, multiple arrays' mode). For example, if `z_values=[0, 10, 20]`, then 
        `attribute_data={'type: [1, 2, 3]}` can be used to create a shapefile field called 'type' with a value for
        each contour in the shapefile. The default is None, which produces a default shapefile field called 'z_value'
        with values taken directly from the `z_values` parameter and formatted as a 'float:9.2' ('single array, 
        multiple z-values' mode), or a field named after `dim` numbered from 0 to the total number of arrays along 
        the `dim` dimension ('single z-value, multiple arrays' mode).
    attribute_dtypes : dict, optional
        An optional dictionary giving the output dtype for each custom shapefile attribute field specified by
        `attribute_data`. For example, `attribute_dtypes={'type: 'int'}` can be used to set the 'type' field to an
        integer dtype. The dictionary should have the same keys/field names as declared in `attribute_data`.
        Valid values include 'int', 'str', 'datetime, and 'float:X.Y', where X is the minimum number of characters
        before the decimal place, and Y is the number of characters after the decimal place.
    dim : string, optional
        The name of the dimension along which to extract contours when operating in 'single z-value, multiple arrays'
        mode. The default is 'time', which extracts contours for each array along the time dimension.
    verbose: bool, optional
        Whether to print the result of each contour extraction to the console. The default is True which prints all
        results; set to False for a cleaner output, particularly when extracting large numbers of contours.

    Returns
    -------
    output_gdf : geopandas geodataframe
        A geopandas geodataframe object with one feature per z-value ('single array, multiple z-values' mode), or one
        row per array along the dimension specified by the `dim` parameter ('single z-value, multiple arrays' mode). 
        If `attribute_data` and `ttribute_dtypes` are provided, these values will be included in the shapefile's 
        attribute table.

    Example
    -------   
    >>> # Import modules
    >>> import sys
    >>> import datacube

    >>> # Import external dea-notebooks functions using relative link to Scripts directory
    >>> sys.path.append('../10_Scripts')
    >>> import SpatialTools

    >>> # Set up datacube instance
    >>> dc = datacube.Datacube(app='Contour extraction')

    ########################################
    # Single array, multiple z-values mode #
    ########################################
    
    >>> # Define an elevation query
    >>> elevation_query = {'lat': (-35.25, -35.35),
    ...                    'lon': (149.05, 149.17),
    ...                    'output_crs': 'EPSG:3577',
    ...                    'resolution': (-25, 25)}

    >>> # Import sample elevation data
    >>> elevation_data = dc.load(product='srtm_dem1sv1_0', **elevation_query)

    >>> # Extract contours
    >>> contour_gdf = SpatialTools.contour_extract(z_values=[600, 700, 800],
    ...                                            ds_array=elevation_data.dem_h,
    ...                                            ds_crs=elevation_data.geobox.crs,
    ...                                            ds_affine=elevation_data.geobox.affine,
    ...                                            output_shp='extracted_contours.shp')
    Dimension 'time' has length of 1; removing from array
    Operating in single array, multiple z-values mode
        Extracting contour 600
        Extracting contour 700
        Extracting contour 800
    Exporting contour shapefile to extracted_contours.shp
    
    ########################################
    # Single z-value, multiple arrays mode #
    ########################################
    
    >>> # Define a Landsat query
    >>> landsat_query = {'lat': (-35.25, -35.35),
    ...                  'lon': (149.05, 149.17),
    ...                  'time': ('2016-02-15', '2016-03-01'),
    ...                  'output_crs': 'EPSG:3577',
    ...                  'resolution': (-25, 25)}

    >>> # Import sample Landsat data
    >>> landsat_data = dc.load(product='ls8_nbart_albers', 
    ...                        group_by='solar_day',
    ...                        **landsat_query)
    
    >>> # Test that there are multiple arrays along the 'time' dimension
    >>> print(len(landsat_data.time))
    2

    >>> # Set up custom attributes to be added as shapefile fields
    >>> attribute_data = {'value': ['first_contour', 'second_contour']}
    >>> attribute_dtypes = {'value': 'str'}

    >>> # Extract contours
    >>> contour_gdf = SpatialTools.contour_extract(z_values=3000,
    ...                                            ds_array=landsat_data.red,
    ...                                            ds_crs=landsat_data.geobox.crs,
    ...                                            ds_affine=landsat_data.geobox.affine,
    ...                                            output_shp='extracted_contours.shp',
    ...                                            attribute_data=attribute_data,
    ...                                            attribute_dtypes=attribute_dtypes,
    ...                                            dim='time')
    Operating in single z-value, multiple arrays mode
        Extracting contour 0
        Extracting contour 1
    Exporting contour shapefile to extracted_contours.shp

    """

    # Obtain affine object from either rasterio/xarray affine or a gdal geotransform:
    if type(ds_affine) != affine.Affine:
        ds_affine = affine.Affine.from_gdal(*ds_affine)

    # If z_values is supplied is not a list, convert to list before proceeding:
    z_values = z_values if isinstance(z_values, list) else [z_values]

    # If array has only one layer along the `dim` dimension (e.g. time), remove the dim:
    try:
        ds_array = ds_array.squeeze(dim=dim)
        print(f"Dimension '{dim}' has length of 1; removing from array")

    except:
        pass

    ########################################
    # Single array, multiple z-values mode #
    ########################################

    # Output dict to hold contours for each offset
    contours_dict = collections.OrderedDict()

    # If array has only two dimensions, run in single array, multiple z-values mode:
    if len(ds_array.shape) == 2:

        print(f'Operating in single array, multiple z-values mode')

        # If no custom attributes given, default to including a single z-value field based on `z_values`
        if not attribute_data:

            # Default field uses two decimal points by default
            attribute_data = {'z_value': z_values}
            attribute_dtypes = {'z_value': 'float:9.2'}

        # If custom attributes are provided, test that they are equal in length to the number of `z-values`:
        else:

            for key, values in attribute_data.items():

                if len(values) != len(z_values):

                    raise Exception(
                        f"Supplied attribute '{key}' has length of {len(values)} while z_values has "
                        f"length of {len(z_values)}; please supply the same number of attribute values "
                        "as z_values")

        for z_value in z_values:

            # Extract contours and convert output array cell coords into arrays of coordinate reference system coords.
            # We need to add (0.5 x the pixel size) to x values and subtract (-0.5 * pixel size) from y values to
            # correct coordinates to give the centre point of pixels, rather than the top-left corner
            if verbose: print(f'    Extracting contour {z_value}')
            ps = ds_affine[0]  # Compute pixel size
            contours_geo = [
                np.column_stack(ds_affine * (i[:, 1], i[:, 0])) +
                np.array([0.5 * ps, -0.5 * ps])
                for i in find_contours(ds_array, z_value)
            ]

            # For each array of coordinates, drop any xy points that have NA
            contours_nona = [i[~np.isnan(i).any(axis=1)] for i in contours_geo]

            # Drop 0 length and add list of contour arrays to dict
            contours_withdata = [
                i for i in contours_nona if len(i) >= min_vertices
            ]

            # If there is data for the contour, add to dict:
            if len(contours_withdata) > 0:
                contours_dict[z_value] = contours_withdata

            else:
                if verbose:
                    print(f'    No data for contour {z_value}; skipping')
                contours_dict[z_value] = None

    ########################################
    # Single z-value, multiple arrays mode #
    ########################################

    # For inputs with more than two dimensions, run in single z-value, multiple arrays mode:
    else:

        # Test if only a single z-value is given when operating in single z-value, multiple arrays mode
        print(f'Operating in single z-value, multiple arrays mode')
        if len(z_values) > 1:
            raise Exception('Please provide a single z-value when operating '
                            'in single z-value, multiple arrays mode')

        # If no custom attributes given, default to including one field based on the `dim` dimension:
        if not attribute_data:

            # Default field is numbered from 0 to the number of arrays along the `dim` dimension:
            attribute_data = {dim: range(0, len(ds_array[dim]))}
            attribute_dtypes = {dim: 'int'}

        # If custom attributes are provided, test that they are equal in length to the number of arrays along `dim`:
        else:

            for key, values in attribute_data.items():

                if len(values) != len(ds_array[dim]):

                    raise Exception(
                        f"Supplied attribute '{key}' has length of {len(values)} while there are "
                        f"{len(ds_array[dim])} arrays along the '{dim}' dimension. Please supply "
                        f"the same number of attribute values as arrays along the '{dim}' dimension"
                    )

        for z_value, _ in enumerate(ds_array[dim]):

            # Extract contours and convert output array cell coords into arrays of coordinate reference system coords.
            # We need to add (0.5 x the pixel size) to x values and subtract (-0.5 * pixel size) from y values to
            # correct coordinates to give the centre point of pixels, rather than the top-left corner
            if verbose: print(f'    Extracting contour {z_value}')
            ps = ds_affine[0]  # Compute pixel size
            contours_geo = [
                np.column_stack(ds_affine * (i[:, 1], i[:, 0])) +
                np.array([0.5 * ps, -0.5 * ps]) for i in find_contours(
                    ds_array.isel({dim: z_value}), z_values[0])
            ]

            # For each array of coordinates, drop any xy points that have NA
            contours_nona = [i[~np.isnan(i).any(axis=1)] for i in contours_geo]

            # Drop 0 length and add list of contour arrays to dict
            contours_withdata = [
                i for i in contours_nona if len(i) >= min_vertices
            ]

            # If there is data for the contour, add to dict:
            if len(contours_withdata) > 0:
                contours_dict[z_value] = contours_withdata

            else:
                if verbose:
                    print(f'    No data for contour {z_value}; skipping')
                contours_dict[z_value] = None

    #######################
    # Export to shapefile #
    #######################

    # If a shapefile path is given, generate shapefile
    if output_shp:

        print(f'Exporting contour shapefile to {output_shp}')

        # Set up output multiline shapefile properties
        schema = {
            'geometry': 'MultiLineString',
            'properties': attribute_dtypes
        }

        # Create output shapefile for writing
        with fiona.open(output_shp,
                        'w',
                        crs={
                            'init': str(ds_crs),
                            'no_defs': True
                        },
                        driver='ESRI Shapefile',
                        schema=schema) as output:

            # Write each shapefile to the dataset one by one
            for i, (z_value, contours) in enumerate(contours_dict.items()):

                if contours:

                    # Create multi-string object from all contour coordinates
                    contour_multilinestring = MultiLineString(contours)

                    # Get attribute values for writing
                    attribute_vals = {
                        field_name: field_vals[i]
                        for field_name, field_vals in attribute_data.items()
                    }

                    # Write output shapefile to file with z-value field
                    output.write({
                        'properties': attribute_vals,
                        'geometry': mapping(contour_multilinestring)
                    })

    # Return dict of contour arrays
    output_gdf = gpd.read_file(output_shp)
    return output_gdf
Ejemplo n.º 39
0
def PolygoniseRasterMerge(DataDirectory,
                          RasterFile,
                          OutputShapefile='polygons'):
    """
    This function takes in a raster and converts to a polygon shapefile using rasterio
    from https://gis.stackexchange.com/questions/187877/how-to-polygonize-raster-to-shapely-polygons/187883#187883?newreg=8b1f507529724a8488ce4789ba787363

    This version recognises where there are multiple polygons with the same key and merges
    them to a MultiPolygon using cascaded_union

    Args:
        DataDirectory (str): the data directory with the basin raster
        RasterFile (str): the name of the raster
        OutputShapefile (str): the name of the output shapefile WITHOUT EXTENSION. Default = 'polygons'

    Returns:
        Dictionary where key is the raster value and the value is a shapely polygon

    Author: FJC
    """
    # import modules
    import rasterio
    from rasterio.features import shapes
    from shapely.geometry import shape, Polygon, mapping
    from shapely.ops import cascaded_union
    import fiona

    # define the mask
    #mask = None
    raster_band = 1

    # get raster no data value
    NDV = getNoDataValue(DataDirectory + RasterFile)

    # load in the raster using rasterio
    with rasterio.open(DataDirectory + RasterFile) as src:
        image = src.read(raster_band, masked=False)

        msk = src.read_masks(1)

        results = ({
            'properties': {
                'raster_val': v
            },
            'geometry': s
        } for i, (
            s,
            v) in enumerate(shapes(image, mask=msk, transform=src.transform)))

    # define shapefile attributes
    # crs = src.crs.wkt
    # print (crs)
    crs = GetUTMEPSG(DataDirectory + RasterFile)
    schema = {'geometry': 'Polygon', 'properties': {'ID': 'float'}}

    # transform results into shapely geometries and write to shapefile using fiona
    geoms = list(results)
    PolygonDict = {}
    with fiona.open(DataDirectory + OutputShapefile,
                    'w',
                    crs=crs,
                    driver='ESRI Shapefile',
                    schema=schema) as output:
        for f in geoms:
            this_shape = Polygon(shape(f['geometry']))
            this_val = float(f['properties']['raster_val'])
            if this_val in PolygonDict:
                Polygons = [this_shape, PolygonDict[this_val]]
                this_shape = cascaded_union(Polygons)
            if this_val != NDV:  # remove no data values
                output.write({
                    'geometry': mapping(this_shape),
                    'properties': {
                        'ID': this_val
                    }
                })

            PolygonDict[this_val] = this_shape

    return PolygonDict
Ejemplo n.º 40
0
    def iterfeatures(self, na="null", show_bbox=False):
        """
        Returns an iterator that yields feature dictionaries that comply with
        __geo_interface__

        Parameters
        ----------
        na : {'null', 'drop', 'keep'}, default 'null'
            Indicates how to output missing (NaN) values in the GeoDataFrame
            * null: ouput the missing entries as JSON null
            * drop: remove the property from the feature. This applies to
                    each feature individually so that features may have
                    different properties
            * keep: output the missing entries as NaN

        show_bbox : include bbox (bounds) in the geojson. default False
        """
        if na not in ["null", "drop", "keep"]:
            raise ValueError("Unknown na method {0}".format(na))

        if self._geometry_column_name not in self:
            raise AttributeError("No geometry data set (expected in"
                                 " column '%s')." % self._geometry_column_name)

        ids = np.array(self.index, copy=False)
        geometries = np.array(self[self._geometry_column_name], copy=False)

        properties_cols = self.columns.difference([self._geometry_column_name])

        if len(properties_cols) > 0:
            # convert to object to get python scalars.
            properties = self[properties_cols].astype(object).values
            if na == "null":
                properties[pd.isnull(self[properties_cols]).values] = None

            for i, row in enumerate(properties):
                geom = geometries[i]

                if na == "drop":
                    properties_items = {
                        k: v
                        for k, v in zip(properties_cols, row)
                        if not pd.isnull(v)
                    }
                else:
                    properties_items = {
                        k: v
                        for k, v in zip(properties_cols, row)
                    }

                feature = {
                    "id": str(ids[i]),
                    "type": "Feature",
                    "properties": properties_items,
                    "geometry": mapping(geom) if geom else None,
                }

                if show_bbox:
                    feature["bbox"] = geom.bounds if geom else None
                yield feature

        else:
            for fid, geom in zip(ids, geometries):
                feature = {
                    "id": str(fid),
                    "type": "Feature",
                    "properties": {},
                    "geometry": mapping(geom) if geom else None,
                }
                if show_bbox:
                    feature["bbox"] = geom.bounds if geom else None
                yield feature
def segment_buildings(config_path,
                      model_weight_path,
                      bounding_box,
                      mapbox_api_key,
                      parcel_polygon=None,
                      cpu=True,
                      building_size_min=30.0,
                      fire_dist=5.0):
    """
       Inpout :
       model_weight_path (string) : Contains the path towards the file containing the weights of the segmentation NN
       bounding_box ((float, float), (float, float)): contains the (lat, long) coordinates of the region of interest, the first one determines the top left corner, the second one determines the bottom right corner
       mapbox_api_key (string): The mapbox_api_key linked with the mapbox account, to be able to make requests
       parcel_polygon list[(float, float)]: Contains a list of latitude/longitude coordinates of the parcel of interest
       building_size_min (float) : minimum area in squared meter of a building in order to ba taken into account
       fire_dist (float) : maximum distance to consider a pair of buildings to be linked
       
       Output :
       image (bytes array): Satellite image delimited by the bounding box
       pred (bytes array): Pixelwise binary prediction (building or not building) on the image
       buildings (list[dict]): Contains various information on each building, the xy and latitude/longitude coordinates of the contour of the building, the area and the connected component the building is in
       dists (numpy array): Contains the distance between each pair of buildings
       nb_comp (int): contains the number of connected component (buildings are linked if they are less than 5 meters apart)
       parcel_polygon_xy : the pixel coordinates of the parcel
    """
    with open(config_path) as f:
        config = json.load(f)
    polygonizer = Polygonizer(config['polygonize_params'])
    if cpu:
        config['device'] = 'cpu'
        config['polygonize_params']['acm_method']['device'] = 'cpu'
    config['polygonize_params']['method'] = 'acm'

    backbone = UNetResNetBackbone(101)
    model = FrameFieldModel(config, backbone)
    if cpu:
        model.load_state_dict(
            torch.load(model_weight_path, map_location=torch.device('cpu')))
    else:
        model.load_state_dict(torch.load(model_weight_path))
        model.cuda()
    top_left = bounding_box[0]
    bottom_right = bounding_box[1]

    latitude_magnitude = np.abs(top_left[0] / 2 + bottom_right[0] / 2)
    if latitude_magnitude <= 30.0:
        z = 18
    elif latitude_magnitude <= 50.0:
        z = 17
    elif latitude_magnitude <= 70.0:
        z = 16
    else:
        z = 15

    top_left_tile = mercantile.tile(top_left[1], top_left[0], z)
    bottom_right_tile = mercantile.tile(bottom_right[1], bottom_right[0], z)
    x_tile_range = [top_left_tile.x, bottom_right_tile.x]
    y_tile_range = [top_left_tile.y, bottom_right_tile.y]

    big_image = np.zeros(((y_tile_range[1] - y_tile_range[0] + 1) * 512, \
                          (x_tile_range[1] - x_tile_range[0] + 1) * 512, 3))

    westernmost = 200.0
    northernmost = -200.0
    southernmost = 200.0
    easternmost = -200.0

    for i, x in enumerate(range(x_tile_range[0], x_tile_range[1] + 1)):
        for j, y in enumerate(range(y_tile_range[0], y_tile_range[1] + 1)):
            west, south, east, north = mercantile.bounds(x, y, z)
            if west <= westernmost:
                westernmost = west
            if south <= southernmost:
                southernmost = south
            if east >= easternmost:
                easternmost = east
            if north >= northernmost:
                northernmost = north

            url = 'https://api.mapbox.com/v4/mapbox.satellite/' + str(
                z) + '/' + str(x) + '/' + str(
                    y) + '@2x.pngraw?access_token=' + mapbox_api_key
            r = requests.get(url, stream=True, verify=True)
            temp_im = np.array(Image.open(io.BytesIO(r.content)))[:, :, :3]
            big_image[j * 512:(j + 1) * 512,
                      i * 512:(i + 1) * 512, :] = temp_im

    model.eval()
    img = process_image(big_image, cpu)
    img = {'image': img.unsqueeze(0)}
    with torch.no_grad():
        big_pred = model(img)[0]

    eps = 0.025
    relative_pos_tl = [max((northernmost - top_left[0]) / (northernmost - southernmost) - eps, 0), \
                       max((top_left[1] - westernmost) / (easternmost - westernmost) - eps, 0)]

    relative_pos_br = [min((northernmost - bottom_right[0]) / (northernmost - southernmost) + eps, 1), \
                       min((bottom_right[1] - westernmost) / (easternmost - westernmost) + eps, 1)]

    pos_tl = [int(np.round(relative_pos_tl[0] * big_image.shape[0])), \
              int(np.round(relative_pos_tl[1] * big_image.shape[1]))]

    pos_br = [int(np.round(relative_pos_br[0] * big_image.shape[0])), \
              int(np.round(relative_pos_br[1] * big_image.shape[1]))]

    image = big_image[pos_tl[0]:pos_br[0],
                      pos_tl[1]:pos_br[1], :].astype(np.uint8)

    parcel_polygon_xy = []
    if parcel_polygon is not None:
        for coord in parcel_polygon:
            relative_coord = [(northernmost - coord[0]) / (northernmost - southernmost), \
                              (coord[1] - westernmost) / (easternmost - westernmost)]
            xy_coord = [int(np.round(relative_coord[1] * big_image.shape[1]) - pos_tl[1]), \
                        int(np.round(relative_coord[0] * big_image.shape[0]) - pos_tl[0])]
            parcel_polygon_xy.append(xy_coord)
        parcel_polygon_xy.append(parcel_polygon_xy[0])

    starttime = timeit.default_timer()
    big_contours = polygonizer(config['polygonize_params'],
                               big_pred['seg'].detach(),
                               big_pred['crossfield'].detach())
    big_contours = big_contours[0][0]['tol_0.125']

    buildings = []
    for contour in big_contours:
        keep = True
        info = {}
        contour_array = np.array(np.round(
            contour.exterior.xy).astype(int)).transpose()
        contour_xy = []
        contour_lat_long = []

        for coord in contour_array:
            if not (coord[1] >= pos_tl[0] and coord[1] < pos_br[0]
                    and coord[0] >= pos_tl[1] and coord[0] < pos_br[1]):
                keep = False
                break
            else:
                contour_xy.append(
                    [int(coord[0] - pos_tl[1]),
                     int(coord[1] - pos_tl[0])])
                long = westernmost + coord[0] * (
                    easternmost - westernmost) / big_image.shape[1]
                lat = northernmost - coord[1] * (
                    northernmost - southernmost) / big_image.shape[0]
                contour_lat_long.append([lat, long])
        if keep:
            contour_xy.append(contour_xy[0])
            contour_lat_long.append(contour_lat_long[0])
            info['xy'] = contour_xy
            info['lat_long'] = contour_lat_long
            if len(contour_lat_long) <= 3:
                info['area'] = 0.0
            else:
                info['area'] = get_area(contour_lat_long)
            buildings.append(info)

    if parcel_polygon is not None:
        for building in buildings:
            parcelle_mask = np.zeros(image.shape)
            building_mask = np.zeros(image.shape)
            cv2.fillPoly(parcelle_mask, [np.array(parcel_polygon_xy)], 1)
            cv2.fillPoly(building_mask, [np.array(building['xy'])], 1)
            info = 2 * building_mask + parcelle_mask
            inside_parcelle_ratio = (info == 3.0).sum() / (
                (info == 3.0).sum() + (info == 2.0).sum())
            building['parcelle_ratio'] = inside_parcelle_ratio

    if parcel_polygon is not None:
        buildings = [
            building for building in buildings
            if building['parcelle_ratio'] == 1.0
        ]
        for building in buildings:
            del building['parcelle_ratio']

    buildings = list(
        filter(lambda k: k['area'] >= building_size_min, buildings))
    buildings = sorted(buildings, key=lambda k: -k['area'])
    i = 1
    for building in buildings:
        building['index'] = i
        i += 1

    dists = get_dist(buildings, image, big_image, westernmost, northernmost,
                     southernmost, easternmost, pos_tl)
    adjacency = np.where(dists < fire_dist, 1, 0) - np.eye(len(dists))
    nb_comp, comps = scipy.sparse.csgraph.connected_components(adjacency)

    for i, comp in enumerate(comps):
        buildings[i]['comp'] = int(comp)

    success, encoded_image = cv2.imencode('.png', image)
    image = encoded_image.tobytes()

    for building in buildings:
        building['xy'] = dict(
            geometry.mapping(geometry.Polygon(building['xy'])))
        building['lat_long'] = dict(
            geometry.mapping(geometry.Polygon(building['lat_long'])))

    buildings = geojson.dumps(buildings)
    buildings = geojson.loads(buildings)

    return image, buildings, dists, nb_comp, parcel_polygon_xy
Ejemplo n.º 42
0
def directions(self):
    #print(self.data["origin"])
    origin = self.data['origin']
    destination = self.data['destination']
    #origin = {}
    #destination = {}
    headers = {
        'Authorization':
        '5b3ce3597851110001cf6248013a1de706624b69a83c5b9a2dd28edf',
        'Content-Type': 'application/json'
    }
    routes = {
        'fastest': {
            'route': 'fastest',
            'request': {
                'coordinates': [
                    origin['geometry']['coordinates'],
                    destination['geometry']['coordinates']
                ],
                'preference':
                'fastest'
            },
            'response': {}
        },
        'shortest': {
            'route': 'shortest',
            'request': {
                'coordinates': [
                    origin['geometry']['coordinates'],
                    destination['geometry']['coordinates']
                ],
                'preference':
                'shortest'
            },
            'response': {}
        },
        'recommended': {
            'route': 'recommended',
            'request': {
                'coordinates': [
                    origin['geometry']['coordinates'],
                    destination['geometry']['coordinates']
                ],
                'preference':
                'recommended'
            },
            'response': {}
        },
        'best': {
            'route': 'best',
            'request': {
                'coordinates': [
                    origin['geometry']['coordinates'],
                    destination['geometry']['coordinates']
                ],
                'preference':
                'recommended'
            },
            'response': {}
        },
    }
    directionsUrl = 'https://api.openrouteservice.org/v2/directions/cycling-regular/geojson'
    '''
    'options': {
        'avoid_features': 'unpavedroads',
        'profile_params': {
            'weightings' : {
                'green': '1',
                'surface_type': 'cobblestone:flattened',
                'smoothness_type': 'good',
            },
            'restrictions' : {

            },  
        },
        "avoid_polygons": {
            "type": "Polygon",
            "coordinates": [
                [
                    [100.0, 0.0],
                    [101.0, 0.0],
                    [101.0, 1.0],
                    [100.0, 1.0],
                    [100.0, 0.0]
                ]
            ]
        }

    }
    '''

    response = requests.post(directionsUrl,
                             data=json.dumps(routes['fastest']['request']),
                             headers=headers)
    routes['fastest']['response'] = response.json()

    response = requests.post(directionsUrl,
                             data=json.dumps(routes['shortest']['request']),
                             headers=headers)
    routes['shortest']['response'] = response.json()

    response = requests.post(directionsUrl,
                             data=json.dumps(routes['recommended']['request']),
                             headers=headers)
    routes['recommended']['response'] = response.json()

    routes['best']['response'] = routes['recommended']['response'].copy()
    routes['best']['response']['metadata']['query']['preference'] = 'best'

    # Draw box around route
    minx = min(origin['geometry']['coordinates'][0],
               destination['geometry']['coordinates'][0])
    miny = min(origin['geometry']['coordinates'][1],
               destination['geometry']['coordinates'][1])
    maxx = max(origin['geometry']['coordinates'][0],
               destination['geometry']['coordinates'][0])
    maxy = max(origin['geometry']['coordinates'][1],
               destination['geometry']['coordinates'][1])
    # routes['best']['response']['features'][0]['geometry']['coordinates']
    for waypoint in routes['best']['response']['features'][0]['geometry'][
            'coordinates']:
        if (waypoint[0] < minx):
            minx = waypoint[0]
        if (waypoint[1] < miny):
            miny = waypoint[1]
        if (waypoint[0] > maxx):
            maxx = waypoint[0]
        if (waypoint[1] > maxy):
            maxy = waypoint[1]
    #bounding_box = geometry.box(minx, miny, maxx, maxy)
    #print(bounding_box)

    # Build buffer around box to include nearby segments that arent directly in between

    buffer = 1 / 5
    bufferx = (maxx - minx) * buffer
    buffery = (maxy - miny) * buffer
    #print(buffery)

    bounding_box = geometry.box(minx - bufferx, miny - buffery, maxx + bufferx,
                                maxy + buffery)
    segments = [[13.3571718, 52.5146251], [13.44374688, 52.4944241],
                [13.3689609, 52.5339110]]
    segmentTiergarten = [13.3571718, 52.5146251]
    segmentGoerli = [13.44374688, 52.4944241]
    #segment = [13.4501013, 52.4965252]
    #print(bounding_box.intersects(geometry.Point(segmentGoerli)))
    #print(bounding_box)
    routes['bounding_box'] = {
        'type':
        'FeatureCollection',
        'bbox':
        [minx - bufferx, miny - buffery, maxx + bufferx, maxy + buffery],
        'features': [{
            'bbox':
            [minx - bufferx, miny - buffery, maxx + bufferx, maxy + buffery],
            'type':
            'Feature',
            'geometry':
            geometry.mapping(bounding_box)
        }]
    }

    routes['isochrones'] = {
        'type': 'FeatureCollection',
        'bbox':
        [minx - bufferx, miny - buffery, maxx + bufferx, maxy + buffery],
        'features': []
    }
    #routes['invalid_isochrones'] = []
    isochronesUrl = 'https://api.openrouteservice.org/v2/isochrones/cycling-regular'

    isochrone = {}
    '''To Do'''
    ### Save Isochrone in DB
    ### Query Isochrones (in bbox?) from DB
    ### Iterate over desired segments
    '''For non node segments'''
    #object.representative_point()
    #Returns a cheaply computed point that is guaranteed to be within the geometric object.

    # store all segments to route through in list
    segments_route = []

    for segment in segments:
        if (bounding_box.intersects(geometry.Point(segment))):
            isochronesData = {
                "locations": [segment],
                # range in seconds
                "range": [360]
            }
            response = requests.post(isochronesUrl,
                                     data=json.dumps(isochronesData),
                                     headers=headers)
            isochrone = geometry.Polygon(
                response.json()['features'][0]['geometry']['coordinates'][0])
            #coordinates = geometry.Polygon(isochrone)

            #print(coordinates)
            #waypoint = {}

            for waypoint in routes['best']['response']['features'][0][
                    'geometry']['coordinates']:
                if (isochrone.intersects(geometry.Point(waypoint))):
                    print(segment,
                          isochrone.intersects(geometry.Point(waypoint)))
                    # send isochrones in response for algorithm visualisation
                    newIsochrone = {
                        'properties': {
                            'osm_id': 0,
                        },
                        'bbox': isochrone.bounds,
                        'type': 'Feature',
                        'geometry': geometry.mapping(isochrone)
                    }
                    routes['isochrones']['features'].append(newIsochrone)
                    segments_route.append(segment)
                    # reroute recommended as best through segment
                    routes['best']['request']['coordinates'] = [
                        origin['geometry']['coordinates']
                    ] + segments_route + [
                        destination['geometry']['coordinates']
                    ]

                    #print(routes['best']['request']['coordinates'])
                    response = requests.post(directionsUrl,
                                             data=json.dumps(
                                                 routes['best']['request']),
                                             headers=headers)
                    routes['best']['response'] = response.json()
                    routes['best']['response']['metadata']['query'][
                        'preference'] = 'best'
                    print('Optimzed Route')
                    break

        # If routes.best goes close by park or other desirable segment, add waypoint and send again
        # Remember first distance and time of recommended so we don't add too much

    return JsonResponse(routes)
Ejemplo n.º 43
0
parser.add_argument('in_filespec', help='source file path')
parser.add_argument('city_name', help='The name of the city')
parser.add_argument(
    '-o',
    '--outpath',
    help='desination directory path, defaults to mapping/json/',
    default='mapping/json/')
args = parser.parse_args()
in_filespec = args.in_filespec
outpath = args.outpath
city_name = args.city_name

if not os.path.exists(os.path.dirname(outpath)):
    os.makedirs(outpath)

with fiona.open(in_filespec) as fiona_collection:

    shapefile_record = fiona_collection.next()

    # Use Shapely to create the polygon
    shape = asShape(shapefile_record['geometry'])
    shape_dict = mapping(shape)
    shape_dict['city_name'] = city_name
    out_filespec = "{}/{}.json".format(outpath,
                                       city_name.lower().replace(" ", "_"))
    j = json.dumps(shape_dict)  #convert it  a string
    f = open(out_filespec, mode='w')
    f.write(j)
    f.close()
    print(j)
Ejemplo n.º 44
0
def createPoints(inshp, outshp, mini_dist):
    
    '''
    This function will parse throigh the street network of provided city and
    clean all highways and create points every mini_dist meters (or as specified) along
    the linestrings
    Required modules: Fiona and Shapely

    parameters:
        inshp: the input linear shapefile, must be in WGS84 projection, ESPG: 4326
        output: the result point feature class
        mini_dist: the minimum distance between two created point

    last modified by Xiaojiang Li, MIT Senseable City Lab
    
    '''
    
    import fiona
    import os,os.path
    from shapely.geometry import shape,mapping
    from shapely.ops import transform
    from functools import partial
    import pyproj
    from fiona.crs import from_epsg
    
    
    count = 0
    s = {'trunk_link','tertiary','motorway','motorway_link','steps', None, ' ','pedestrian','primary', 'primary_link','footway','tertiary_link', 'trunk','secondary','secondary_link','tertiary_link','bridleway','service'}
    
    # the temporaray file of the cleaned data
    root = os.path.dirname(inshp)
    basename = 'clean_' + os.path.basename(inshp)
    temp_cleanedStreetmap = os.path.join(root,basename)
    
    # if the tempfile exist then delete it
    if os.path.exists(temp_cleanedStreetmap):
        fiona.remove(temp_cleanedStreetmap, 'ESRI Shapefile')
    """
    # remove this block comment if you are using maps from open street data. If you have cleaned your dataset on your own do not remove this
    # clean the original street maps by removing highways, if it the street map not from Open street data, users'd better to clean the data themselve
    with fiona.open(inshp) as source, fiona.open(temp_cleanedStreetmap, 'w', driver=source.driver, crs=source.crs,schema=source.schema) as dest:
        
        for feat in source:
            try:
                i = feat['properties']['highway'] # for the OSM street data
                if i in s:
                    continue
            except:
                # if the street map is not osm, do nothing. You'd better to clean the street map, if you don't want to map the GVI for highways
                key = list(dest.schema['properties'].keys())[0] # get the field of the input shapefile and duplicate the input feature
                i = feat['properties'][key]
                if i in s:
                    continue
            
            dest.write(feat)
    """
    schema = {
        'geometry': 'Point',
        'properties': {'id': 'int'},
    }

    # Create pointS along the streets
    with fiona.drivers():
        #with fiona.open(outshp, 'w', 'ESRI Shapefile', crs=source.crs, schema) as output:
        with fiona.open(outshp, 'w', crs = from_epsg(4326), driver = 'ESRI Shapefile', schema = schema) as output:
            # for line in fiona.open(temp_cleanedStreetmap):   ## use this instead of the next line if you are working with open street data 
            for line in fiona.open(inshp):
                first = shape(line['geometry'])
                length = first.length
                
                try:
                    # convert degree to meter, in order to split by distance in meter
                    project = partial(pyproj.transform, pyproj.Proj(init='EPSG:4326'), pyproj.Proj(init='EPSG:3857')) #3857 is psudo WGS84 the unit is meter
                    line2 = transform(project, first)
                    #linestr = list(line2.coords) # commented the line since it is not being used elsewhere and throwing errors in some cases
                    dist = mini_dist #set
                    for distance in range(0,int(line2.length), dist):
                        point = line2.interpolate(distance)
                        # convert the local projection back the the WGS84 and write to the output shp
                        project2 = partial(pyproj.transform,pyproj.Proj(init='EPSG:3857'),pyproj.Proj(init='EPSG:4326'))
                        point = transform(project2, point)
                        output.write({'geometry':mapping(point),'properties': {'id':1}})
                except Exception as e:
                    #print(str(e))  remove the comment line if you want to debug
                    print ("You should make sure the input shapefile is WGS84")
                    return
                    
    print("Process Complete")
    
    # delete the temprary cleaned shapefile
    fiona.remove(temp_cleanedStreetmap, 'ESRI Shapefile')
Ejemplo n.º 45
0
def _force_polygon_ccw(geometry):
    polygon = shape(geometry)
    return mapping(orient(polygon))
Ejemplo n.º 46
0
def get_sub_image(idx, selected_polygon, image_tile_list, image_tile_bounds,
                  save_path, dstnodata, brectangle):
    '''
    get a mask image based on a selected polygon, it may cross two image tiles
    :param selected_polygon: selected polygons
    :param image_tile_list: image list
    :param image_tile_bounds: the boxes of images in the list
    :param save_path: save path
    :param brectangle: if brectangle is True, crop the raster using bounds, else, use the polygon
    :return: True is successful, False otherwise
    '''

    # find the images which the center polygon overlap (one or two images)
    img_index = get_overlap_image_index([selected_polygon], image_tile_bounds)
    if len(img_index) < 1:
        basic.outputlogMessage(
            'Warning, %dth polygon do not overlap any image tile, please check '  #and its buffer area
            '(1) the shape file and raster have the same projection'
            ' and (2) this polygon is in the extent of images' % idx)
        return False

    image_list = [image_tile_list[item] for item in img_index]

    # check it cross two or more images
    if len(image_list) == 1:
        # for the case that the polygon only overlap one raster
        with rasterio.open(image_list[0]) as src:
            polygon_json = mapping(selected_polygon)

            # not necessary
            # overlap_win = rasterio.features.geometry_window(src, [polygon_json], pad_x=0, pad_y=0, north_up=True, rotated=False,
            #                               pixel_precision=3)

            if brectangle:
                # polygon_box = selected_polygon.bounds
                polygon_json = mapping(
                    selected_polygon.envelope
                )  #shapely.geometry.Polygon([polygon_box])

            # crop image and saved to disk
            out_image, out_transform = mask(src, [polygon_json],
                                            nodata=dstnodata,
                                            all_touched=True,
                                            crop=True)

            # test: save it to disk
            out_meta = src.meta.copy()
            out_meta.update(
                {
                    "driver": "GTiff",
                    "height": out_image.shape[1],
                    "width": out_image.shape[2],
                    "transform": out_transform,
                    "nodata": dstnodata
                }
            )  # note that, the saved image have a small offset compared to the original ones (~0.5 pixel)
            with rasterio.open(save_path, "w", **out_meta) as dest:
                dest.write(out_image)
        pass
    else:
        # for the case it overlap more than one raster, need to produce a mosaic
        tmp_saved_files = []

        for k_img, image_path in enumerate(image_list):
            with rasterio.open(image_path) as src:
                polygon_json = mapping(selected_polygon)
                if brectangle:
                    # polygon_box = selected_polygon.bounds
                    polygon_json = mapping(
                        selected_polygon.envelope
                    )  # shapely.geometry.Polygon([polygon_box])

                # crop image and saved to disk
                out_image, out_transform = mask(src, [polygon_json],
                                                nodata=dstnodata,
                                                all_touched=True,
                                                crop=True)

                tmp_saved = os.path.splitext(save_path)[
                    0] + '_%d' % k_img + os.path.splitext(save_path)[1]
                # test: save it to disk
                out_meta = src.meta.copy()
                out_meta.update(
                    {
                        "driver": "GTiff",
                        "height": out_image.shape[1],
                        "width": out_image.shape[2],
                        "transform": out_transform,
                        "nodata": dstnodata
                    }
                )  # note that, the saved image have a small offset compared to the original ones (~0.5 pixel)
                with rasterio.open(tmp_saved, "w", **out_meta) as dest:
                    dest.write(out_image)
                tmp_saved_files.append(tmp_saved)

        # mosaic files in tmp_saved_files
        mosaic_args_list = [
            'gdal_merge.py', '-o', save_path, '-n',
            str(dstnodata), '-a_nodata',
            str(dstnodata)
        ]
        mosaic_args_list.extend(tmp_saved_files)
        if basic.exec_command_args_list_one_file(mosaic_args_list,
                                                 save_path) is False:
            raise IOError('error, obtain a mosaic (%s) failed' % save_path)

        # # for test
        # if idx==13:
        #     raise ValueError('for test')

        # remove the tmp files
        for tmp_file in tmp_saved_files:
            io_function.delete_file_or_dir(tmp_file)

    # if it will output a very large image (10000 by 10000 pixels), then raise a error

    return True
Ejemplo n.º 47
0
def trip_to_geojson(feed: "Feed",
                    trip_id: str,
                    *,
                    include_stops: bool = False) -> Dict:
    """
    Return a GeoJSON representation of the given trip, optionally with
    its stops.

    Parameters
    ----------
    feed : Feed
    trip_id : string
        ID of trip in ``feed.trips``
    include_stops : boolean

    Returns
    -------
    dictionary
        A (decoded) GeoJSON FeatureCollection comprising a Linestring
        feature representing the trip's shape.
        If ``include_stops``, then also include one Point feature for
        each stop  visited by the trip.
        The Linestring feature will contain as properties all the
        columns in ``feed.trips`` pertaining to the given trip,
        and each Point feature will contain as properties all the
        columns in ``feed.stops`` pertaining to the stop,
        except the ``stop_lat`` and ``stop_lon`` properties.

        Return the empty dictionary if the trip has no shape.

    """
    # Get the relevant shapes
    t = feed.trips.copy()
    t = t[t["trip_id"] == trip_id].copy()
    shid = t["shape_id"].iat[0]
    geometry_by_shape = feed.build_geometry_by_shape(use_utm=False,
                                                     shape_ids=[shid])

    if not geometry_by_shape:
        return {}

    features = [{
        "type": "Feature",
        "properties": json.loads(t.to_json(orient="records"))[0],
        "geometry": sg.mapping(sg.LineString(geometry_by_shape[shid])),
    }]

    if include_stops:
        # Get relevant stops and geometrys
        s = feed.get_stops(trip_id=trip_id)
        cols = set(s.columns) - set(["stop_lon", "stop_lat"])
        s = s[list(cols)].copy()
        stop_ids = s["stop_id"].tolist()
        geometry_by_stop = feed.build_geometry_by_stop(stop_ids=stop_ids)
        features.extend([{
            "type":
            "Feature",
            "properties":
            json.loads(
                s[s["stop_id"] == stop_id].to_json(orient="records"))[0],
            "geometry":
            sg.mapping(geometry_by_stop[stop_id]),
        } for stop_id in stop_ids])

    return {"type": "FeatureCollection", "features": features}
Ejemplo n.º 48
0
def get_one_sub_image_label(idx, center_polygon, class_int, polygons_all,
                            class_int_all, bufferSize, img_tile_boxes,
                            image_tile_list):
    '''
    get an sub image and the corresponding labe raster
    :param idx: the polygon index
    :param center_polygon: the polygon in training polygon
    :param class_int: the class number of this polygon
    :param polygons_all: the full set of training polygons, for generating label images
    :param class_int_all: the class number for the full set of training polygons
    :param bufferSize: the buffer area to generate sub-images
    :param img_tile_boxes: the bound boxes of all the image tiles
    :param image_tile_list: the list of image paths
    :return:
    '''

    ############# This function is not working  #############

    # center_polygon corresponds to one polygon in the full set of training polygons, so it is not necessary to check
    # get adjacent polygon
    adj_polygons, adj_polygons_class = get_adjacent_polygons(
        center_polygon, polygons_all, class_int_all, bufferSize)

    # add the center polygons to adj_polygons
    adj_polygons.extend([center_polygon])
    adj_polygons_class.extend([class_int])
    basic.outputlogMessage('get a sub image covering %d training polygons' %
                           len(adj_polygons))

    # find the images which the center polygon overlap (one or two images)
    img_index = get_overlap_image_index(adj_polygons, img_tile_boxes)
    if len(img_index) < 1:
        basic.outputlogMessage(
            'Warning, %dth polygon and the adjacent ones do not overlap any image tile, please check '
            '(1) the shape file and raster have the same projection'
            'and (2) this polygon is in the extent of images' % idx)

    image_list = [image_tile_list[item] for item in img_index]

    # open the raster to get projection, resolution
    # with rasterio.open(image_list[0]) as src:
    #     resX = src.res[0]
    #     resY = src.res[1]
    #     src_profile = src.profile
    src = rasterio.open(image_list[0])
    resX = src.res[0]
    resY = src.res[1]
    src_profile = src.profile

    # rasterize the shapes
    burn_shapes = [
        (item_shape, item_class_int)
        for (item_shape,
             item_class_int) in zip(adj_polygons, adj_polygons_class)
    ]
    burn_boxes = get_bounds_of_polygons(adj_polygons)

    # check weather the extent is too large
    burn_boxes_width = math.ceil((burn_boxes[2] - burn_boxes[0]) / resX)
    burn_boxes_height = math.ceil((burn_boxes[3] - burn_boxes[1]) / resY)

    if burn_boxes_width * burn_boxes_height > 10000 * 10000:
        raise ValueError(
            'error, the polygons want to burn cover a very large area')

    # fill as 255 for region outsize shapes for test purpose
    # set all_touched as True, may good small shape
    # new_transform = (burn_boxes[0], resX, 0, burn_boxes[3], 0, -resY )  # (X_min, resX, 0, Y_max, 0, -resY)  # GDAL-style transforms, have been deprecated after raster 1.0
    # affine.Affine() vs. GDAL-style geotransforms: https://rasterio.readthedocs.io/en/stable/topics/migrating-to-v1.html
    new_transform = (resX, 0, burn_boxes[0], 0, -resY, burn_boxes[3]
                     )  # (resX, 0, X_min, 0, -resY, Y_max)
    out_label = rasterize(burn_shapes,
                          out_shape=(burn_boxes_width, burn_boxes_height),
                          transform=new_transform,
                          fill=0,
                          all_touched=False,
                          dtype=rasterio.uint8)
    print('new_transform', new_transform)
    print('out_label', out_label.shape)

    # test, save to disk
    kwargs = src.meta
    kwargs.update(dtype=rasterio.uint8,
                  count=1,
                  width=burn_boxes_width,
                  height=burn_boxes_height,
                  transform=new_transform)
    with rasterio.open('test_6_albers.tif', 'w', **kwargs) as dst:
        dst.write_band(1, out_label.astype(rasterio.uint8))

    # mask, get pixels cover by polygons, set all_touched as True
    polygons_json = [mapping(item) for item in adj_polygons]
    out_image, out_transform = mask(src,
                                    polygons_json,
                                    nodata=0,
                                    all_touched=True,
                                    crop=True)

    #test: output infomation
    print('out_transform', out_transform)
    print('out_image', out_image.shape)

    # test: save it to disk
    out_meta = src.meta.copy()
    out_meta.update(
        {
            "driver": "GTiff",
            "height": out_image.shape[1],
            "width": out_image.shape[2],
            "transform": out_transform
        }
    )  # note that, the saved image have a small offset compared to the original ones (~0.5 pixel)
    save_path = "masked_of_polygon_%d.tif" % (idx + 1)
    with rasterio.open(save_path, "w", **out_meta) as dest:
        dest.write(out_image)

    # return image_array, label_array
    return 1, 1
Ejemplo n.º 49
0
 def _serialize(self, value, attr, obj):
     if value is None:
         return None
     return mapping(to_shape(value))
Ejemplo n.º 50
0
def shapelyGeometryToGeoJSON(geometry):
    geoDict = mapping(geometry)
    geoString = dumps(geoDict)
    return geoString
    def __call__(self, value, system):
        """
        Implements a subclass of pyramid_oereb.lib.renderer.extract.json_.Renderer to create a print result
        out of a json. The json extract is reformatted to fit the structure of mapfish print.

        Args:
            value (tuple): A tuple containing the generated extract record and the params
                dictionary.
            system (dict): The available system properties.

        Returns:
            buffer: The pdf content as received from configured mapfish print instance url.
        """
        log.debug("Parameter webservice is {}".format(value[1]))

        if value[1].images:
            raise HTTPBadRequest('With image is not allowed in the print')

        self._request = self.get_request(system)

        # Create a lower case GET dict to be able to accept all cases of upper and lower case writing
        self._lowercase_GET_dict = dict((k.lower(), v.lower()) for k, v in self._request.GET.iteritems())

        # If a language is specified in the request, use it. Otherwise, use the language from base class
        self._fallback_language = Config.get('default_language')
        if 'lang' in self._lowercase_GET_dict:
            self._language = self._lowercase_GET_dict.get('lang')

        # Based on extract record and webservice parameter, render the extract data as JSON
        extract_record = value[0]
        extract_as_dict = self._render(extract_record, value[1])
        feature_geometry = mapping(extract_record.real_estate.limit)
        pdf_to_join = set()

        if Config.get('print', {}).get('compute_toc_pages', False):
            extract_as_dict['nbTocPages'] = TocPages(extract_as_dict).getNbPages()
        else:
            extract_as_dict['nbTocPages'] = 1

        self.convert_to_printable_extract(extract_as_dict, feature_geometry, pdf_to_join)

        print_config = Config.get('print', {})

        extract_as_dict['Display_RealEstate_SubunitOfLandRegister'] = print_config.get(
            'display_real_estate_subunit_of_land_register', True
        )

        extract_as_dict['Display_Certification'] = print_config.get(
            'display_certification', True
        )

        spec = {
            'layout': Config.get('print', {})['template_name'],
            'outputFormat': 'pdf',
            'lang': self._language,
            'attributes': extract_as_dict,
        }

        response = self.get_response(system)

        if self._request.GET.get('getspec', 'no') != 'no':
            response.headers['Content-Type'] = 'application/json; charset=UTF-8'
            return json.dumps(spec, sort_keys=True, indent=4)
        pdf_url = urlparse.urljoin(Config.get('print', {})['base_url'] + '/', 'buildreport.pdf')
        pdf_headers = Config.get('print', {})['headers']
        print_result = requests.post(
            pdf_url,
            headers=pdf_headers,
            data=json.dumps(spec)
        )
        try:
            if Config.get('print', {}).get('compute_toc_pages', False):
                with io.BytesIO() as pdf:
                    pdf.write(print_result.content)
                    pdf_reader = PdfFileReader(pdf)
                    x = []
                    for i in range(len(pdf_reader.getOutlines())):
                        x.append(pdf_reader.getOutlines()[i]['/Page']['/StructParents'])
                    try:
                        true_nb_of_toc = min(x)-1
                    except ValueError:
                        true_nb_of_toc = 1

                    if true_nb_of_toc != extract_as_dict['nbTocPages']:
                        log.warning('nbTocPages in result pdf: {} are not equal to the one predicted : {}, request new pdf'.format(true_nb_of_toc,extract_as_dict['nbTocPages'])) # noqa
                        extract_as_dict['nbTocPages'] = true_nb_of_toc
                        print_result = requests.post(
                            pdf_url,
                            headers=pdf_headers,
                            data=json.dumps(spec)
                        )
        except PdfReadError as e:
            err_msg = 'a problem occurred while generating the pdf file'
            log.error(err_msg + ': ' + str(e))
            raise HTTPInternalServerError(err_msg)

        if not extract_as_dict['isReduced'] and print_result.status_code == 200:
            main = tempfile.NamedTemporaryFile(suffix='.pdf')
            main.write(print_result.content)
            main.flush()
            cmd = ['pdftk', main.name]
            temp_files = [main]
            for url in pdf_to_join:
                result = requests.get(url)
                content_type = result.headers.get('content-type')
                log.debug("document url: " + url + " => content_type: " + content_type)
                if content_type != 'application/pdf':
                    msg = "Skipped document inclusion (url: '{}') because content_type: '{}'"
                    log.warning(msg.format(url, content_type))
                    continue
                tmp_file = tempfile.NamedTemporaryFile(suffix='.pdf')
                tmp_file.write(result.content)
                tmp_file.flush()
                temp_files.append(tmp_file)
                cmd.append(tmp_file.name)
            out = tempfile.NamedTemporaryFile(suffix='.pdf')
            cmd += ['cat', 'output', out.name]
            sys.stdout.flush()
            time.sleep(0.1)
            subprocess.check_call(cmd)
            content = out.file.read()
        else:
            content = print_result.content

        # Save printed file to the specified path.
        pdf_archive_path = print_config.get('pdf_archive_path', None)
        if pdf_archive_path is not None:
            self.archive_pdf_file(pdf_archive_path, content, extract_as_dict)

        response.status_code = print_result.status_code
        response.headers = print_result.headers
        if 'Transfer-Encoding' in response.headers:
            del response.headers['Transfer-Encoding']
        if 'Connection' in response.headers:
            del response.headers['Connection']
        return content
Ejemplo n.º 52
0
        # 'maxy': 'float',
    }
}

for tz in range(minzoom, maxzoom + 1):
    with collection(tilefilename % tz, "w", "ESRI Shapefile",
                    schema) as output:
        print " * Processing Zoom Level %s ..." % (tz, )
        tminx, tminy = mercator.MetersToTile(bbox[0], bbox[1], tz)
        tmaxx, tmaxy = mercator.MetersToTile(bbox[2], bbox[3], tz)
        for ty in range(tminy, tmaxy + 1):
            for tx in range(tminx, tmaxx + 1):
                # Use top origin tile scheme (like OSM or GMaps)
                ymax = 1 << tz
                invert_ty = ymax - ty - 1
                tilebounds = mercator.TileBounds(tx, ty, tz)

                poly = box(*tilebounds)
                output.write({
                    'properties': {
                        'x': tx,
                        'y': invert_ty,
                        'z': tz,
                        # 'minx': float(tilebounds[0]),
                        # 'miny': float(tilebounds[1]),
                        # 'maxx': float(tilebounds[2]),
                        # 'maxy': float(tilebounds[3]),
                    },
                    'geometry': mapping(poly)
                })
Ejemplo n.º 53
0
        'id': idx,
        'length': segment.length,
        'lines': len(segment) if is_intersection else 1,
        'pointx': segment.centroid.x,
        'pointy': segment.centroid.y,
        'records': len(records) if records else 0
    }
    for year_range in year_ranges:
        max_occurred, min_occurred, records_label = year_range
        if records:
            records_in_range = [
                record for record in records
                if min_occurred < record['occurred'] <= max_occurred
            ]
            data[records_label] = len(records_in_range)
        else:
            data[records_label] = 0
    segments_with_data.append((segment, data))

# Step 7: Output the result in shapefile format
with fiona.open("PATH TO OUTPUT FILE",
                'w',
                driver='ESRI Shapefile',
                schema=schema) as output:
    for segment_with_data in segments_with_data:
        segment, data = segment_with_data

        if segment.geom_type == 'MultiLineString':

            output.write({'geometry': mapping(segment), 'properties': data})
Ejemplo n.º 54
0
def elem2shp(
    elem_nodes,
    node_coords,
    elem_sub,
    lake_elems,
    shape_name,
    epsg=26910,
    verbose=False,
):
    ''' elem2shp() - Creates an IWFM element shapefile 

    TODO:
      - change from fiona to pyshp and wkt format

    Parameters
    ----------
    elem_nodes : list
        list of elements and associated nodes
    
    node_coords : list
        list of nodes and associated X and Y coordinates
    
    elem_sub : list
        list of elements and associated subregions
    
    lake_elems : list
        list of lakes and associated elements
    
    shape_name : str
         output shapefiles base name
    
    epsg : int default=26910 (NAD 83 UTM 10, CA)
        EPSG projection
    
    verbose : bool, default=False
        True = command-line output on

    Returns
    -------
    nothing

    '''
    import fiona
    import fiona.crs
    import shapefile as shp  # pyshp
    from shapely.geometry import mapping, Polygon

    import iwfm as iwfm

    elem_shapename = f'{shape_name}_Elements.shp'

    # Create list of element polygons
    polygons = iwfm.elem_poly_coords(elem_nodes, node_coords)

    # Define the polygon feature geometry
    elem_schema = {
        'geometry': 'Polygon',
        'properties': {
            'elem_id': 'int',
            'subregion': 'int',
            'lake_no': 'int'
        },
    }

    # Write a new element shapefile
    with fiona.open(
            elem_shapename,
            'w',
            crs=fiona.crs.from_epsg(epsg),
            driver='ESRI Shapefile',
            schema=elem_schema,
    ) as out:
        for i in range(0, len(polygons)):
            poly = Polygon(polygons[i])
            lake_no = 0
            if lake_elems > 0:
                for j in range(0, len(lake_elems)):
                    if lake_elems[j][1] == i + 1:  # lake on this element
                        lake_no = lake_elems[j][0]
            out.write({
                'geometry': mapping(poly),
                'properties': {
                    'elem_id': i + 1,
                    'subregion': elem_sub[i],
                    'lake_no': lake_no,
                },
            })
    if verbose:
        print(f'  Wrote shapefile {elem_shapename}')
Ejemplo n.º 55
0
def create_item(hdf_path, additional_providers=None):
    file_name = os.path.basename(hdf_path)
    scene_id = AsterSceneId.from_path(file_name)

    with rio.open(hdf_path) as f:
        tags = f.tags()

    xmin, xmax = float(tags.pop('WESTBOUNDINGCOORDINATE')), float(
        tags.pop('EASTBOUNDINGCOORDINATE'))
    ymin, ymax = float(tags.pop('SOUTHBOUNDINGCOORDINATE')), float(
        tags.pop('NORTHBOUNDINGCOORDINATE'))
    geom = mapping(box(xmin, ymin, xmax, ymax))
    bounds = shape(geom).bounds

    dt = str_to_datetime(tags.pop('SETTINGTIMEOFPOINTING.1'))

    item = pystac.Item(
        id=scene_id.item_id,
        geometry=geom,
        bbox=bounds,
        datetime=dt,
        properties={'aster:processing_number': scene_id.processing_number})

    # Common metadata
    item.common_metadata.providers = [ASTER_PROVIDER]
    if additional_providers is not None:
        item.common_metadata.providers.extend(additional_providers)
    item.common_metadata.created = str_to_datetime(
        tags.pop('PRODUCTIONDATETIME'))
    item.common_metadata.platform = tags.pop('PLATFORMSHORTNAME')
    item.common_metadata.instruments = [tags.pop('INSTRUMENTSHORTNAME')]

    # eo
    item.ext.enable('eo')
    # STAC uses 0-100, planet 0-1
    item.ext.eo.cloud_cover = int(tags.pop('SCENECLOUDCOVERAGE'))

    # view
    item.ext.enable('view')
    # Don't pop, to keep the 3 values in properties.
    item.ext.view.off_nadir = abs(float(tags['POINTINGANGLE.1']))
    sun_azimuth, sun_elevation = [
        float(x) for x in tags['SOLARDIRECTION'].split(', ')
    ]
    item.ext.view.sun_azimuth = float(sun_azimuth)
    # Sun elevation can be negative; if so, will break validation; leave out.
    # See https://github.com/radiantearth/stac-spec/issues/853
    sun_elevation = float(sun_elevation)
    if sun_elevation >= 0.0:
        item.ext.view.sun_elevation = float(sun_elevation)

    # proj
    item.ext.enable('projection')
    item.ext.projection.epsg = epsg_from_aster_utm_zone_number(
        int(tags.pop('UTMZONENUMBER')))

    # Add all additional properties with Planet extension designation.
    for k, v in tags.items():
        item.properties['aster:{}'.format(k)] = v

    hdf_href = make_absolute_href(hdf_path)

    asset = pystac.Asset(href=hdf_href,
                         media_type=pystac.MediaType.HDF,
                         roles=['data'],
                         title="ASTER L1T 003 HDF-EOS")

    item.ext.eo.set_bands(ASTER_BANDS, asset)

    item.add_asset(HDF_ASSET_KEY, asset)

    return item
Ejemplo n.º 56
0
def create_shapefile_from_csv(geometry_type="Polygon"):

    if geometry_type in ("Polygon", "Point", "Centroid"):
        csv_fn = "/home/james/geocrud/adrc/candidate_locations.csv"
        classifications_fn = "/home/james/Desktop/classifications_by_id.csv"
        img_fn = "/home/james/serviceDelivery/ADRC/NLS_samples/82877397.tif"
        shp_fn = "/home/james/Desktop/candidate_locations_" + geometry_type.lower() + ".shp"

        ds = rasterio.open(img_fn)
        ds_affine = ds.affine

        schema_properties = OrderedDict(
            [
                ("id", "int"),
                ("x", "int"),
                ("y", "int"),
                ("w", "int"),
                ("h", "int")
            ])

        my_schema = {
            "geometry": geometry_type,
            "properties": schema_properties
        }

        if geometry_type == "Centroid":
            my_schema["geometry"] = "Point"

        my_driver = "ESRI Shapefile"
        my_crs = from_epsg(27700)

        with open(csv_fn, "r") as inpf:
            with fiona.open(shp_fn, "w", driver=my_driver, crs=my_crs, schema=my_schema) as outpf:
                my_reader = csv.reader(inpf)
                c = 1
                for r in my_reader:
                    if c > 1:
                        id = int(r[0])
                        x = int(r[1])
                        y = int(r[2])
                        w = int(r[3])
                        h = int(r[4])

                        ul_coord = ds_affine * (x, y)
                        ll_coord = ds_affine * ((x), (y+h))
                        ur_coord = ds_affine * ((x+w), y)

                        if geometry_type == "Point":
                            feature_geom = Point(ur_coord)

                        if geometry_type == "Polygon":
                            #ur_coord = ds_affine * ((x+w), (y-h))
                            feature_geom = box(ll_coord[0], ll_coord[1], ur_coord[0], ur_coord[1])

                        if geometry_type == "Centroid":
                            ur_coord = ds_affine * ((x+w), (y-h))
                            bbox = box(ll_coord[0], ll_coord[1], ur_coord[0], ur_coord[1])
                            feature_geom = bbox.centroid

                        outpf.write({
                            "geometry": mapping(feature_geom),
                            "properties": {
                            "id": id,
                            "x": x,
                            "y": y,
                            "w": w,
                            "h": h
                            }
                        })

                    c += 1
Ejemplo n.º 57
0
def contour(container,
            imtype,
            component,
            intervals=None,
            filter_size=DEFAULT_FILTER_SIZE):
    """
    Generate contours of a specific IMT and return as a Shapely
    MultiLineString object.

    Args:
        container (ShakeMapOutputContainer): ShakeMapOutputContainer
            with ShakeMap output data.
        imtype (str): String containing the name of an Intensity
            Measure Type found in container.
        component (str): Intensity Measure component found in container.
        intervals (np.ndarray or None): Array of intervals for IMT, or None.
        filter_size (int): Integer filter (see
            https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.ndimage.filters.median_filter.html)
    Returns:
        list: List of dictionaries containing two fields

                - geometry: GeoJSON-like representation of one of the objects
                  in https://toblerity.org/fiona/manual.html#geometry-types
                - properties: Dictionary of properties describing that
                  feature.

    Raises:
        NotImplementedError -- if the user attempts to contour a data file
            with sets of points rather than grids.
    """
    if container.getDataType() != 'grid':
        raise NotImplementedError('contour module can only contour '
                                  'gridded data, not sets of points')
    imtdict = container.getIMTGrids(imtype, component)
    gridobj = imtdict['mean']
    grid = gridobj.getData()
    metadata = gridobj.getGeoDict().asDict()
    if imtype == 'MMI':
        sgrid = grid
        fgrid = median_filter(sgrid, size=filter_size)
        units = 'mmi'
    elif imtype == 'PGV':
        sgrid = np.exp(grid)
        fgrid = median_filter(sgrid, size=10)
        units = 'cms'
    else:
        sgrid = np.exp(grid) * 100.0
        fgrid = median_filter(sgrid, size=10)
        units = 'pctg'

    if intervals is None:
        interval_type = 'log'
        if imtype == 'MMI':
            interval_type = 'linear'
        intervals = _get_default_intervals(fgrid, interval_type=interval_type)

    lonstart = metadata['xmin']
    latstart = metadata['ymin']
    lonspan = np.abs(metadata['xmax'] - lonstart)
    latspan = np.abs(metadata['ymax'] - latstart)
    nlon = metadata['nx']
    nlat = metadata['ny']

    line_strings = []  # dictionary of MultiLineStrings and props

    for cval in intervals:
        contours = measure.find_contours(fgrid, cval)
        #
        # Convert coords to geographic coordinates; the coordinates
        # are returned in row, column order (i.e., (y, x))
        #
        new_contours = []
        plot_contours = []
        for ic, coords in enumerate(contours):  # coords is a line segment
            if len(coords) <= 20:  # skipping little contour islands?
                continue

            contours[ic][:, 0] = coords[:, 1] * lonspan / nlon + lonstart
            contours[ic][:, 1] = (nlat - coords[:, 0]) * \
                latspan / nlat + latstart
            plot_contours.append(contours[ic])
            new_contours.append(contours[ic].tolist())

        if len(new_contours):
            mls = MultiLineString(new_contours)
            props = {'value': cval, 'units': units}
            line_strings.append({
                'geometry': mapping(mls),
                'properties': props
            })
    return line_strings
Ejemplo n.º 58
0
#!/usr/bin/env python
# coding: utf-8

# In[1]:

from shapely.geometry import mapping, shape

# In[2]:

from fiona import collection

# In[3]:

with collection('sample.shp', 'r') as input:
    #schema = input.schema.copy()
    schema = {'geometry': 'Polygon', 'properties': {'name': 'str'}}
    with collection('some_buffer.shp', 'w', 'ESRI Shapefile',
                    schema) as output:
        for point in input:
            output.write({
                'properties': {
                    'name': point['properties']['name']
                },
                'geometry':
                mapping(shape(point['geometry']).buffer(5.0))
            })
Ejemplo n.º 59
0
def PolygoniseRaster(DataDirectory, RasterFile, OutputShapefile='polygons'):
    """
    This function takes in a raster and converts to a polygon shapefile using rasterio
    from https://gis.stackexchange.com/questions/187877/how-to-polygonize-raster-to-shapely-polygons/187883#187883?newreg=8b1f507529724a8488ce4789ba787363

    Args:
        DataDirectory (str): the data directory with the basin raster
        RasterFile (str): the name of the raster
        OutputShapefile (str): the name of the output shapefile WITHOUT EXTENSION. Default = 'polygons'

    Returns:
        Dictionary where key is the raster value and the value is a shapely polygon

    Author: FJC
    """
    # import modules
    import rasterio
    from rasterio.features import shapes
    from shapely.geometry import shape, Polygon, mapping
    import fiona

    # define the mask
    #mask = None
    raster_band = 1

    # get raster no data value
    NDV = getNoDataValue(DataDirectory + RasterFile)

    # load in the raster using rasterio
    with rasterio.open(DataDirectory + RasterFile) as src:
        image = src.read(raster_band, masked=False)

        msk = src.read_masks(1)

        results = ({
            'properties': {
                'raster_val': v
            },
            'geometry': s
        } for i, (
            s,
            v) in enumerate(shapes(image, mask=msk, transform=src.transform)))

    # define shapefile attributes
    # crs = src.crs.wkt
    # print (crs)
    crs = GetUTMEPSG(DataDirectory + RasterFile)
    schema = {'geometry': 'Polygon', 'properties': {'ID': 'float'}}

    # This is necessary to filter the basin results
    geoms = list(results)
    #print("Geom size is: "+str(len(geoms)))

    filtered_geoms = {}
    area_dict = {}
    for f in geoms:
        this_shape = Polygon(shape(f['geometry']))
        this_val = float(f['properties']['raster_val'])
        #print("ID is: "+str(this_val))
        this_area = this_shape.area
        if this_val in filtered_geoms.keys():
            print(
                "Whoops. Found a repeated ID. Getting rid of the smaller one.")
            if area_dict[this_val] < this_area:
                filtered_geoms[this_val] = f
                area_dict[this_val] = this_area
                print("Found a repeated ID. Keeping the one with area of " +
                      str(this_area))
            else:
                print("Keeping the initial ID.")
        else:
            filtered_geoms[this_val] = f
            area_dict[this_val] = this_area

    new_geoms = []
    for key, item in filtered_geoms.items():
        this_shape = Polygon(shape(item['geometry']))
        this_val = float(item['properties']['raster_val'])
        #print("ID is: "+str(this_val))
        this_area = this_shape.area
        #print("Area is: "+str(this_area))
        new_geoms.append(item)
    #print("Geom size is: "+str(len(new_geoms)))

    # transform results into shapely geometries and write to shapefile using fiona
    PolygonDict = {}
    with fiona.open(DataDirectory + OutputShapefile,
                    'w',
                    crs=crs,
                    driver='ESRI Shapefile',
                    schema=schema) as output:
        for f in new_geoms:
            this_shape = Polygon(shape(f['geometry']))
            this_val = float(f['properties']['raster_val'])
            print("ID is: " + str(this_val))
            if this_val != NDV:  # remove no data values
                output.write({
                    'geometry': mapping(this_shape),
                    'properties': {
                        'ID': this_val
                    }
                })
            PolygonDict[this_val] = this_shape

    return PolygonDict
Ejemplo n.º 60
0
def shape_to_feature(g, props={}):
    # shapely returns tuples (we need lists)
    g = mapping(g)
    g['coordinates'] = convert_coords_to_lists(g['coordinates'])
    return {"type": "Feature", 'properties': props, "geometry": g}