Ejemplo n.º 1
0
    def test_json_overwrite_invalid(self):
        """Overwrite an existing file that isn't a valid GeoJSON"""

        # write some invalid data to a file
        path = os.path.join(self.tempdir, "foo.json")
        with open(path, "w") as f:
            f.write("This isn't a valid GeoJSON file!!!")

        schema1 = {"geometry": "Unknown", "properties": [("title", "str")]}
        features1 = [
            {
                "geometry": {"type": "Point", "coordinates": [0.0, 0.0]},
                "properties": {"title": "One"},
            },
            {
                "geometry": {"type": "MultiPoint", "coordinates": [[0.0, 0.0]]},
                "properties": {"title": "Two"},
            }
        ]

        # attempt to overwrite it with a valid file
        with fiona.open(path, "w", driver="GeoJSON", schema=schema1) as dst:
            dst.writerecords(features1)

        # test the data was written correctly
        with fiona.open(path, "r") as src:
            self.assertEqual(len(src), 2)
Ejemplo n.º 2
0
def write_data_to_regions(src_path, dst_path, key_property, data):
    with fiona.open(src_path) as src:

        # Copy the source schema and add the new properties.
        sink_schema = src.schema.copy()
        for col in data.columns:
            data_dtype = str(data[col].dtype)
            if data_dtype.startswith('float'):
                schema_dtype = 'float'
            elif data_dtype.startswith('int'):
                schema_dtype = 'int'
            else:
                raise NotImplementedError('unsupported dtype')

            sink_schema['properties'][col] = schema_dtype

        settings = dict(crs=src.crs, driver=src.driver, schema=sink_schema)

        if os.path.exists(dst_path):
            os.remove(dst_path)
        with fiona.open(dst_path, 'w', **settings) as dst:
            for feature in src:
                key = feature['properties'][key_property]
                for col in data:
                    feature['properties'][col] = data.loc[key, col]
                dst.write(feature)
def test_with_line(tmpdir):

    p = tmpdir.mkdir('testfiles')
    testfile = str(p.join('out.geojson'))
    linefile = testfile.replace('out', 'line')

    result = CliRunner().invoke(gpsdio.cli.main.main_group, [
        '--o-drv', 'Vector',
        '--o-drv-opt', 'driver=GeoJSON',
        '--o-drv-opt', 'line={}'.format(linefile),
        'etl',
        'tests/data/points.json',
        testfile
    ])
    assert result.exit_code == 0

    with fio.open(linefile) as src:
        assert src.driver == 'GeoJSON'
        assert len(src) == 1
        assert src.schema['geometry'] == 'LineString'
        assert src.schema['properties'] == {}
        line_coordinates = src[0]['geometry']['coordinates']

    with fio.open(testfile) as vector, gpsdio.open('tests/data/points.json') as messages:
        for feature, msg, l_coord in zip(vector, messages, line_coordinates):
            m_x = msg['lon']
            m_y = msg['lat']
            f_x, f_y = feature['geometry']['coordinates'][:2]
            l_x, l_y = l_coord

            assert round(m_x, 7) == round(f_x, 7) == round(l_x, 7)
            assert round(m_y, 7) == round(f_y, 7) == round(l_y, 7)
Ejemplo n.º 4
0
def run(in_flname, out_flname, dist, fld, fltr):
    try:
        print("Starting proccess...")

        createOutFolder(out_flname)

        with fiona.open(in_flname, 'r') as in_ds:
            in_drv = in_ds.driver
            in_crs = in_ds.crs
            in_sch = in_ds.schema

            out_schema = {'geometry': 'Polygon',
                          'properties': in_sch.get('properties')}

            with fiona.open(out_flname, 'w',
                        driver=in_drv,
                        crs=in_crs,
                        schema=out_schema) as out_ds:
                for ft in in_ds:
                    if fltr in ft['properties'].get(fld):
                        geom_shl = shape(ft.get('geometry')).buffer(dist)
                        out_ds.write({'geometry': mapping(geom_shl),
                                      'properties': ft.get('properties'),
                                      'type': ft.get('type'),
                                      'id': ft.get('id')})

        print("Buffer successfully created!")

    except Exception as error:
        print("Error computing Buffer: {}".format(error))
Ejemplo n.º 5
0
  def buffer(self, toBuffer, outFile, distance, dissolve):

    with fiona.open(toBuffer, 'r') as input:
      schema = input.schema
      crs = input.crs
      schema['geometry'] = 'Polygon'
      
      buf_features = []
      for f in input:   
        buf_features.append(( shape(f['geometry']).buffer(distance), f['properties'] ))
      
      if dissolve == True:
        buf_features = cascaded_union([geom for geom, prop in buf_features])
        schema = {'geometry':buf_features.geom_type, 'properties':{'fid':'int'}}
        buf_features = [(buf_features, {'fid':'1'})]
   

    #in windows compiled shapely library python crashes if str has 255 characters
    #works without this block in source compiled verions 
    #--------------------------------------------------
    for k, v in schema['properties'].items():
      if v[0:3] == 'str' and v[-3:] == '255':
        schema['properties'][k] = 'str:254'
    #--------------------------------------------------
   
    with fiona.open(outFile, 'w', 'ESRI Shapefile', crs=crs, schema=schema) as output:
      for geom, prop in buf_features: 
        output.write({'geometry': mapping(geom), 'properties':prop})
Ejemplo n.º 6
0
def get_extents_from_huc(huc_data_shp=None,extents_output_shp=None,extents_huc_list=None):
    '''Extracts a user-specified HUC or list of HUCs from the national dataset and writes it
    to a shapefile. 'huc_data_shp'=shapefile that includes the huc polygons
    that will be extracted.'''
       
    extents_huc_scale = len(extents_huc_list[0])
    huc_field = 'HUC' + str(extents_huc_scale)
    
    with fiona.open(huc_data_shp) as vin:
        schema = vin.schema
        crs = vin.crs
        driver = vin.driver        
    
    # Reduce the extract schema to only the huc id field    
    schema['properties'] = {huc_field:'str'}
    
    # Now write the model domain shapefile     
    with fiona.open(huc_data_shp) as vect_in:
        polygon_list = []        
        for feature in vect_in:
            if (feature['properties'][huc_field] in extents_huc_list): 
                polygon_list.append(shape(feature['geometry']))
                merged = unary_union(polygon_list)

    with fiona.open(extents_output_shp,'w',driver=driver,crs=crs,schema=schema) as extract_out:
        extract_out.write({'geometry': mapping(merged),'properties':{huc_field:'Merged'}})
    
    return
Ejemplo n.º 7
0
def get_ibound_from_huc(huc_data_shp=None,ibound_output_shp=None,ibound_huc_scale=None,extents_huc_list=None):
    '''Reduces the shapefile used for the ibound zonation scheme to only those
    polygons (i.e., HUCs) that are in the active model area.  For higher resolution
    zonation (i.e., using smaller HUCs for the IBOUND zonation scheme) this is much
    faster than clipping the zonation shapefile before rasterizing.'''

    print '\nReducing the IBOUND zonation shapefile to the active model area.\n' 
    
    zone_id_field = 'HUC' + str(ibound_huc_scale)
    
    with fiona.open(huc_data_shp,'r') as vin:
        
        driver,crs,schema = vin.driver,vin.crs,vin.schema
        schema['properties'] = {zone_id_field:'str'}
        
        with fiona.open(ibound_output_shp,'w',driver=driver,crs=crs,schema=schema) as vout:
        
            for feature in vin:
                izone = int(feature['properties']['HUC' + str(ibound_huc_scale)])
                check_izone = str(izone).zfill(int(ibound_huc_scale))
                
                for ihuc in extents_huc_list:
                    if (check_izone.startswith(ihuc)):
                        
                        igeometry = shape(feature['geometry'])
                        vout.write({'geometry': mapping(igeometry),'properties':{zone_id_field:izone}})

    return
Ejemplo n.º 8
0
    def test_date(self):
        self.sink = fiona.open(
            os.path.join(self.tempdir, "date_test.shp"),
            "w",
            driver="ESRI Shapefile",
            schema={
                'geometry': 'Point',
                'properties': [('id', 'int'), ('date', 'date')]},
            crs={'init': "epsg:4326", 'no_defs': True})

        recs = [{
            'geometry': {'type': 'Point',
                         'coordinates': (7.0, 50.0)},
            'properties': {'id': 1, 'date': '2013-02-25'}
        }, {
            'geometry': {'type': 'Point',
                         'coordinates': (7.0, 50.2)},
            'properties': {'id': 1, 'date': datetime.date(2014, 2, 3)}
        }]
        self.sink.writerecords(recs)
        self.sink.close()
        self.failUnlessEqual(len(self.sink), 2)

        c = fiona.open(os.path.join(self.tempdir, "date_test.shp"), "r")
        self.failUnlessEqual(len(c), 2)

        rf1, rf2 = list(c)
        self.failUnlessEqual(rf1['properties']['date'], '2013-02-25')
        self.failUnlessEqual(rf2['properties']['date'], '2014-02-03')
Ejemplo n.º 9
0
def split_multi_to_single_poly(input_dir, output_dir):
    # get list of countries in input dir
    countries = get_countries(input_dir)

    # make dir to hold unioned and dissolved shapefiles
    rm_and_mkdir(output_dir)

    for country in countries:
        # specify io directories and filenames
        input_filename = country + '.shp'
        input_path = os.path.join(input_dir, input_filename)
        output_path = os.path.join(output_dir, input_filename)
        # write to split geometries (polys intstead of multi-polys) to target dir
        try:
            print country
            with fiona.open(input_path) as input:
                # create the new file: the driver, crs and schema are the same
                with fiona.open(output_path, 'w', driver=input.driver, crs=input.crs, schema=input.schema) as output:
                    # read the input file
                    for multi in input:
                        # extract each Polygon feature
                        for poly in shape(multi['geometry']):
                            # write the Polygon feature
                            output.write({'properties': multi['properties'], 'geometry': mapping(poly)})
        except:
            print 'error with %s' % country
Ejemplo n.º 10
0
    def test_date(self):
        self.sink = fiona.open(
            os.path.join(self.tempdir, "date_test.shp"),
            "w",
            driver="ESRI Shapefile",
            schema={"geometry": "Point", "properties": [("id", "int"), ("date", "date")]},
            crs={"init": "epsg:4326", "no_defs": True},
        )

        recs = [
            {"geometry": {"type": "Point", "coordinates": (7.0, 50.0)}, "properties": {"id": 1, "date": "2013-02-25"}},
            {
                "geometry": {"type": "Point", "coordinates": (7.0, 50.2)},
                "properties": {"id": 1, "date": datetime.date(2014, 2, 3)},
            },
        ]
        self.sink.writerecords(recs)
        self.sink.close()
        self.assertEqual(len(self.sink), 2)

        with fiona.open(os.path.join(self.tempdir, "date_test.shp"), "r") as c:
            self.assertEqual(len(c), 2)

            rf1, rf2 = list(c)
            self.assertEqual(rf1["properties"]["date"], "2013-02-25")
            self.assertEqual(rf2["properties"]["date"], "2014-02-03")
Ejemplo n.º 11
0
def included_NUTS(regions, output, cover_files):

    covers = [json.loads(open(path, 'r').read()) for path in cover_files]

    def sufficient_cover(code):
        return all(
            code in cover and
            cover[code] is not None and
            abs(cover[code]-1) <= 0.05
            for cover in covers)

    # First list of candidates
    candidates = nuts_partition()

    # This is a lousy hack: Requires that substrate amounts are computable,
    # so manure_mgmt.pkl and animal_pop.pkl must exist (see Makefile)
    import biogasrm.results as results
    import biogasrm.parameters as parameters
    params = parameters.defaults()
    substrates_known = set(results.get_substrates(params).index)

    # Exclude candidates which are not sufficiently covered
    included = set(
        [c for c in candidates
        if sufficient_cover(c) and c in substrates_known])

    with fiona.open(regions, 'r') as src:
        settings = dict(driver=src.driver, crs=src.crs, schema=src.schema.copy())
        with fiona.open(output, 'w', **settings) as dst:
            for f in src:
                if f['properties']['NUTS_ID'] in included:
                    dst.write(f)
Ejemplo n.º 12
0
    def test_transaction(self, tmpdir):
        """
        Test transaction start/commit is called the expected number of times,
        and that the default transaction size can be overloaded. The test uses
        a custom logging handler to listen for the debug messages produced
        when the transaction is started/comitted.
        """
        num_records = 250
        transaction_size = 100

        assert fiona.ogrext.DEFAULT_TRANSACTION_SIZE == 20000
        fiona.ogrext.DEFAULT_TRANSACTION_SIZE = transaction_size
        assert fiona.ogrext.DEFAULT_TRANSACTION_SIZE == transaction_size

        path = str(tmpdir.join("output.gpkg"))

        feature = next(create_records(1))

        schema = {
            "geometry": "Point",
            "properties": {"value": "int"}
        }

        with fiona.open(path, "w", driver="GPKG", schema=schema) as dst:
            dst.writerecords(create_records(num_records))

        assert self.handler.history["Starting transaction (initial)"] == 1
        assert self.handler.history["Starting transaction (intermediate)"] == num_records // transaction_size
        assert self.handler.history["Comitting transaction (intermediate)"] == num_records // transaction_size
        assert self.handler.history["Comitting transaction (final)"] == 1

        with fiona.open(path, "r") as src:
            assert len(src) == num_records
Ejemplo n.º 13
0
 def test_feat(self, path_coutwildrnp_shp):
     with fiona.open(path_coutwildrnp_shp, "r", layer="coutwildrnp") as c:
         f1 = next(iter(c))
         with fiona.open(path_coutwildrnp_shp, "r",
                         layer="coutwildrnp") as c2:
             f2 = next(iter(c2))
             assert f1 == f2
Ejemplo n.º 14
0
def create_output_shp(source_shp, source_data, dest_dir):
    dest_shp = pth.join(dest_dir, normalize(source_shp).split("_")[0] +
                        "_" + normalize(source_data) + ".shp")
    if pth.exists(dest_shp):
        return
    with fiona.open(source_shp) as shape:
        new_schema = dict()
        new_schema['geometry'] = shape.schema['geometry']
        new_schema['properties'] = OrderedDict([(k, v) for k, v in shape.schema['properties'].iteritems()
                                                if k != "SUP_M2" and k != "SUP_Ha"])
        log.info("Loading data from %s" % source_data)
        data = pd.read_csv(source_data, index_col=0, header=0)

        for column in data.index:
            new_schema['properties'][str(column)] = "float:7.4"
        log.info("Writing shapefile %s" % dest_shp)

        with fiona.open(dest_shp, "w",
                        driver=shape.driver,
                        crs=shape.crs,
                        schema=new_schema) as dest:
            for rec in shape:
                del rec['properties']["SUP_M2"]
                del rec['properties']["SUP_Ha"]
                for column in data.index:
                    rec['properties'][str(column)] = data.at[column, rec['properties']['PART']]
                log.info("Writing record %s" % rec['id'])
                dest.write(rec)
def split_shape_by_attribute(infile, field, dest):
    """split by attributes for shapefiles

    Works fine, for both unique and non-unique attribute values! Records with the 
    same attributes are grouped in seperate, new shapefiles...

    Parameters
    ----------
    infile : str
        filename of the input shapefile
    field : str
        name of the atrribute table field
    dest : str
        define destination -folder- where output -shapefiles- 
        will be written
    """
    with fiona.open(infile) as source:
        meta = source.meta
        for f in source:
            outfile = os.path.join(dest, "%s.shp" % f['properties'][field])
            try:
                with fiona.open(outfile, 'a', **meta) as sink:
                    sink.write(f)
            except:
                with fiona.open(outfile, 'w', **meta) as sink:
                    sink.write(f)
def shp_writer(model_shp, geoDF, output_shp):
    with fiona.open(model_shp) as source:
        source_driver = source.driver
        source_crs = source.crs
        source_schema = source.schema
        #previous fields in properties are deleted
        del source_schema['properties']
        #a new field is set with its respectie data type
        source_schema['properties'] = {'mxint15min': 'float'}
        #writing a new file    
        with fiona.open(output_shp,
                        'w',
                        driver=source_driver,
                        crs=source_crs,
                        schema=source_schema) as collection:
            #rec = {'geometry': mapping(geoDF.loc[0].polygon),'properties':{'mxrai15min': 0.5}}
            #collection.write(rec)
            for i in geoDF.index:
                #create a record
                rec = {}
                #fill geometry
                rec['geometry'] = mapping(geoDF.loc[i].polygon)
                #fill attribute values
                intensity = float(geoDF.loc[i].maxrain_15min)
                rec['properties'] = {'mxint15min': intensity}
                collection.write(rec)
Ejemplo n.º 17
0
    def test_shapefile_generation(self):
        # Process config file
        test_artifact_path = os.path.join(self.main_artifact_path, 'shapefiles')
        config = self.parse_vector_config(self.shapefile_test_config, test_artifact_path)

        # Open input shapefile and get stats
        try:
            with fiona.open(config['input_files'][0]) as geojson:
                origin_num_features = len(list(geojson))
        except fiona.errors.FionaValueError:
            self.fail("Can't open input geojson {0}. Make sure it's valid.".format(config['input_files'][0]))
        
        # Run vectorgen
        os.chdir(test_artifact_path)
        cmd = 'oe_vectorgen -c ' + self.shapefile_test_config
        run_command(cmd, ignore_warnings=True)

        # Check the output
        output_file = os.path.join(config['output_dir'], config['prefix'] + '.shp')
        try:
            with fiona.open(output_file) as shapefile:
                self.assertEqual(origin_num_features, len(list(shapefile)),
                                 "Feature count between input GeoJSON {0} and output shapefile {1} differs. There is a problem with the conversion process."
                                 .format(config['input_files'][0], output_file))
        except IOError:
            self.fail("Expected output geojson file {0} doesn't appear to have been created.".format(output_file))
        except fiona.errors.FionaValueError:
            self.fail("Bad output geojson file {0}.".format(output_file))
Ejemplo n.º 18
0
def test_dict_subclass(tmpdir):
    """Rasterio now has a `CRS()` class that subclasses
    `collections.UserDict()`.  Make sure we can receive it.

    `UserDict()` is a good class to test against because in Python 2 it is
    not a subclass of `collections.Mapping()`, so it provides an edge case.
    """

    class CRS(six.moves.UserDict):
        pass

    outfile = str(tmpdir.join('test_UserDict.geojson'))

    profile = {
        'crs': CRS(init='EPSG:4326'),
        'driver': 'GeoJSON',
        'schema': {
            'geometry': 'Point',
            'properties': {}
        }
    }

    with fiona.open(outfile, 'w', **profile) as dst:
        dst.write({
            'type': 'Feature',
            'properties': {},
            'geometry': {
                'type': 'Point',
                'coordinates': (10, -10)
            }
        })

    with fiona.open(outfile) as src:
        assert len(src) == 1
        assert src.crs == {'init': 'epsg:4326'}
Ejemplo n.º 19
0
    def test_write_mismatch(self):
        """TOFIX: OGR silently fails to convert strings"""
        # Details:
        #
        # If we tell OGR that we want a latin-1 encoded output file and
        # give it a feature with a unicode property that can't be converted
        # to latin-1, no error is raised and OGR just writes the utf-8
        # encoded bytes to the output file.
        #
        # This might be shapefile specific.
        #
        # Consequences: no error on write, but there will be an error
        # on reading the data and expecting latin-1.
        schema = {
            'geometry': 'Point',
            'properties': {'label': 'str', 'num': 'int'}}

        with fiona.open(os.path.join(self.tempdir, "test-write-fail.shp"),
                        'w', driver="ESRI Shapefile", schema=schema,
                        encoding='latin1') as c:
            c.writerecords([{
                'type': 'Feature',
                'geometry': {'type': 'Point', 'coordinates': [0, 0]},
                'properties': {
                    'label': u'徐汇区',
                    'num': 0}}])

        with fiona.open(os.path.join(self.tempdir), encoding='latin1') as c:
            f = next(iter(c))
            # Next assert fails.
            self.assertEqual(f['properties']['label'], u'徐汇区')
    def process_file(self, inFile, outFile, region):
        # process file is the final processing step which writes to the user-defined outputFile
        with fiona.open(inFile, 'r', encoding='utf-8') as input:
            input_driver = input.driver
            input_crs = input.crs
            input_schema = input.schema.copy()
            input_schema['properties']['shield_typ'.encode("utf-8")] = 'str:254'
            input_schema['properties']['label'.encode("utf-8")] = 'str:254'
            input_schema['properties']['seg_len'] = 'int:10'
            input_schema['properties']['label_len'] = 'int:10'
            with fiona.open(outFile, 'w', driver=input_driver, crs=input_crs, schema=input_schema, encoding='utf-8') as output:
                for item in input:
                    shield_val = self.create_shield_type(item,region)
                    item['properties']['shield_typ'] = shield_val
                    label_val = self.create_label(item,region)
                    item['properties']['label'] = label_val
                    segment_length_val = shape(item['geometry']).length
                    item['properties']['seg_len'] = segment_length_val
                    # remove items that have no value in the label field
                    if label_val is None:
                        continue
                    # measure the length of characters in the label field
                    label_length_val = len(label_val)
                    # for USA region only, remove items that have a label length >5 or = 0
                    if region == "USA" and (label_length_val > 5 or label_length_val == 0):
                        continue
                    item['properties']['label_len'] = label_length_val

                    output.write({'properties': item['properties'],'geometry': mapping(shape(item['geometry']))})
 def dissolve (self, inFile, outFile):
     # create dictionary for storing the uniqueRefs
     uniqueRefs = {}
     with fiona.open(inFile, 'r', encoding='utf-8') as input:
         input_driver = input.driver
         input_crs = input.crs
         input_schema = {'geometry': 'MultiLineString','properties': {'ref'.encode("utf-8"): 'str:254'}}
         with fiona.open(outFile, 'w', driver=input_driver, crs=input_crs, schema=input_schema, encoding='utf-8') as output:
             for item in input:
                 # extract the key, if the 'ref' attribute is NOT called 'ref' 
                 # you can insert the different attribute name HERE (and only HERE).
                 key = item['properties']['ids_and_re']
                 geom = shape(item['geometry'])
                 # find all motorways within the New Zealand mainland
                 # and remove all letters per  
                 newZeaBox = [(17920614.01, -4033681.682),(20362002, -4054837.565),(20357771.35, -6073108.484),(17683668.157,-6068877.308)]
                 newZeaPoly = Polygon(newZeaBox)
                 if geom.within(newZeaPoly):
                     key = re.sub(r'\D',"", key)
                 if not geom.type.startswith('Multi'):
                     geom = [geom]
                 for g in geom:
                     if key in uniqueRefs:
                         uniqueRefs[key].append(g)
                     else:
                         uniqueRefs[key] = [g]
             for key in uniqueRefs:
                 # omit lines that have blank 'ref' tags
                 if key is not None and key != 'None':
                     dissolve_feat = cascaded_union(uniqueRefs[key])
                     output.write({'geometry':mapping(dissolve_feat), 'properties': {'ref': key}})
Ejemplo n.º 22
0
def getNoDataGrid(predictors,xmin,xmax,ymin,ymax):
    txmin = xmin
    txmax = xmax
    tymin = ymin
    tymax = ymax
    mindx = 9999999999
    mindy = 9999999999
    #figure out bounds enclosing all files
    for predname,predfile in predictors.items():
        if not os.path.isfile(predfile):
            continue
        ftype = getFileType(predfile)
        if ftype == 'shapefile':
            f = fiona.open(predfile,'r')
            bxmin,bymin,bxmax,bymax = f.bounds
            f.close()
            if bxmin < txmin:
                txmin = bxmin
            if bxmax > txmax:
                txmax = bxmax
            if bymin < tymin:
                tymin = bymin
            if bymax > tymax:
                tymax = bymax
        elif ftype == 'grid':
            gridtype = getGridType(predfile)
            if gridtype is None:
                raise Exception('File "%s" does not appear to be either a GMT grid or an ESRI grid.' % gridfile)
            fdict = getFileGeoDict(predfile,gridtype)
            if fdict.dx < mindx:
                mindx = fdict.dx
            if fdict.dy < mindy:
                mindy = fdict.dy
            if fdict.xmin < txmin:
                txmin = fdict.xmin
            if fdict.xmax > txmax:
                txmax = txmax
            if fdict.ymin < tymin:
                tymin = tymin
            if fdict.ymax > tymax:
                tymax = tymax
    sdict = GeoDict.createDictFromBox(txmin,txmax,tymin,tymax,mindx,mindy)
    nanarray = np.zeros((sdict.ny,sdict.nx),dtype=np.int8)
    for predname,predfile in predictors.items():
        if not os.path.isfile(predfile):
            continue
        ftype = getFileType(predfile)
        if ftype == 'shapefile':
            shapes = list(fiona.open(predfile,'r'))
            grid = Grid2D.rasterizeFromGeometry(shapes,sdict)
        else:
            gridtype = getGridType(predfile)
            if gridtype == 'gmt':
                grid = GMTGrid.load(predfile,samplegeodict=sdict,resample=True,method='nearest',doPadding=True)
            else:
                grid = GDALGrid.load(predfile,samplegeodict=sdict,resample=True,method='nearest',doPadding=True)
        nangrid = np.isnan(grid.getData())
        nanarray = nanarray | nangrid
    nangrid = Grid2D(data=nanarray,geodict=sdict)
    return nangrid
    def process_file(self, inFile, outFile, threshold):

        with fiona.open(inFile, 'r') as input:
            meta = input.meta
            # The outFile has the same crs, schema as inFile
            with fiona.open(outFile, 'w', **meta) as output:

            # Read shapely geometries from file
            # Loop through all shapely objects
                for myGeom in input:

                    myShape = shape(myGeom['geometry'])

                    if isinstance(myShape, Polygon):
                        myShape = self.simplify_polygon(myShape, threshold)
                    elif isinstance(myShape, MultiPolygon):
                        myShape = self.simplify_multipolygon(myShape, threshold)
                    elif isinstance(myShape, LineString):
                        myShape = self.simplify_line(myShape, threshold)
                    elif isinstance(myShape, MultiLineString):
                        myShape = self.simplify_multiline(myShape, threshold)
                    else:
                        raise ValueError('Unhandled geometry type: ' + repr(myShape.type))

                    # write to outfile
                    if myShape is not None:
                        output.write({'geometry':mapping(myShape), 'properties': myGeom['properties']})
Ejemplo n.º 24
0
def make_shapefile(data, name):
    path = os.path.join('op_data',name + '.shp')
    crs = crs = from_epsg('29902')
    if type(data) == dict:
        a_schema = {'geometry': 'Point',
                            'properties': {'name':'str', 'address':'str'}
                    }
        with fiona.open(path, "w",
                        driver= 'ESRI Shapefile',
                        crs= crs,
                        schema= a_schema) as output:
            for k, v in data.items():
                parts = k.split(',')
                name = parts[0]
                output.write({
                            'properties':{'name':name, 'address':k},
                              'geometry':geometry.mapping(v)})
    else:
        geom_type = data.geom_type

        a_schema = {'geometry': geom_type,
                            'properties': {'name':'str'}
                           }
        with fiona.open(path, "w",
                        driver= 'ESRI Shapefile',
                        crs= crs,
                        schema= a_schema) as output:
            output.write({
                        'properties':{'name':name},
                          'geometry':geometry.mapping(data)})
Ejemplo n.º 25
0
def test_date(tmpdir):
    name = str(tmpdir.join("date_test.shp"))
    sink = fiona.open(
        name, "w",
        driver="ESRI Shapefile",
        schema={
            'geometry': 'Point',
            'properties': [('id', 'int'), ('date', 'date')]},
        crs={'init': "epsg:4326", 'no_defs': True})

    recs = [{
        'geometry': {'type': 'Point',
                     'coordinates': (7.0, 50.0)},
        'properties': {'id': 1, 'date': '2013-02-25'}
    }, {
        'geometry': {'type': 'Point',
                     'coordinates': (7.0, 50.2)},
        'properties': {'id': 1, 'date': datetime.date(2014, 2, 3)}
    }]
    sink.writerecords(recs)
    sink.close()
    assert len(sink) == 2

    with fiona.open(name, "r") as c:
        assert len(c) == 2

        rf1, rf2 = list(c)
        assert rf1['properties']['date'] == '2013-02-25'
        assert rf2['properties']['date'] == '2014-02-03'
Ejemplo n.º 26
0
def to_crs(old, new, new_epsg=4326):
    """Convert old shapefile to new shapefile with new crs.

    """
    crs = from_epsg(new_epsg)

    with fiona.open(old, 'r') as source:
        sink_schema = source.schema.copy()
        p_in = Proj(source.crs)
        with fiona.open(
                new, 'w',
                crs=crs,
                driver=source.driver,
                schema=sink_schema,
                ) as sink:

            p_out = Proj(sink.crs)

            for f in source:

                try:
                    assert f['geometry']['type'] == "Polygon"
                    new_coords = []
                    for ring in f['geometry']['coordinates']:
                        x2, y2 = transform(p_in, p_out, *zip(*ring))
                        new_coords.append(zip(x2, y2))
                    f['geometry']['coordinates'] = new_coords
                    sink.write(f)

                except Exception, e:
                    # In practice, this won't work for most shapes since they
                    # are frequently of type MultiPolygon.
                    print("Error transforming feature {}".format(f['id']))
                    raise
Ejemplo n.º 27
0
def spatial_join(points_shp, poly_shp, outcsv):
    points = fiona.open(points_shp, 'r')
    polys = fiona.open(poly_shp,'r')
    
    shp_polys = []
    poly_ids = []
    for p in poly:
        poly_ids.append(p['attributes'][POLYIDFIELD])
        shp_polys.append(shapely.shape(p['geometry']))

    shp_points = []
    point_ids = []
    for p in points:
        point_ids.append(p['attributes'][PTIDFIELD])
        shp_pts.append(shapely.point(p['geometry']))
    
    r = [["pt_ids","poly_ids"]]
    for i in range(len(shp_polys)):
        p = shp_polys[i]
        r.append([poly_ids[i],shp_polys[filter(p.crosses,shp_pts)]])
    
    f = open(outcsv,'r')
    c = csv.writer(f)
    c.writerows(r)
    
    f.close()
Ejemplo n.º 28
0
def create_nx_graph(turbinesfn,tiffile,directory,layerDict):
    #tiffile and turbinesfn should have COMPLETE directory.
    with fiona.open(layerDict.iteritems().next()[1]['file']) as sample:
        crs = sample.crs
    siteGraph = nx.Graph()
    sitePos = {}
    i = 0
    with fiona.open(turbinesfn) as turbines:
        if turbines.crs != crs:
            proj1=pyproj.Proj(turbines.crs,preserve_units=True)
            proj2=pyproj.Proj(crs,preserve_units=True)
            def conversion(x,y):
                return pyproj.transform(proj1,proj2,x,y)
        else:
            def conversion(x,y):
                return (x,y)
        
        sitePos = {}
        for i,t in enumerate(turbines): 
            try:
                sitePos[t['id']]= conversion(*t['geometry']['coordinates'])
            except:
                print 'ERROR when reading Shapefile'
                continue
        siteGraph.add_nodes_from(sitePos.keys())
        for combo in itertools.combinations(siteGraph.nodes(),2):
            distance = simple_shortest_path(tiffile,sitePos[combo[0]],sitePos[combo[1]])
            siteGraph.add_edge(combo[0],combo[1],weight=distance)
    return siteGraph, sitePos
Ejemplo n.º 29
0
def convert_multipart_to_singlepart(path_in, path_out, new_uid_name=UgridToolsConstants.LINK_ATTRIBUTE_NAME, start=0):
    """
    Convert a vector GIS file from multipart to singlepart geometries. The function copies all attributes and
    maintains the coordinate system.

    :param str path_in: Path to the input file containing multipart geometries.
    :param str path_out: Path to the output file.
    :param str new_uid_name: Use this name as the default for the new unique identifier.
    :param int start: Start value for the new unique identifier.
    """

    with fiona.open(path_in) as source:
        len_source = len(source)
        source.meta['schema']['properties'][new_uid_name] = 'int'
        with fiona.open(path_out, mode='w', **source.meta) as sink:
            for ctr, record in enumerate(source, start=1):
                geom = shape(record['geometry'])
                if isinstance(geom, BaseMultipartGeometry):
                    for element in geom:
                        record['properties'][new_uid_name] = start
                        record['geometry'] = mapping(element)
                        sink.write(record)
                        start += 1
                else:
                    record['properties'][new_uid_name] = start
                    sink.write(record)
                    start += 1
Ejemplo n.º 30
0
    def testCreateBigIntSchema(self):
        name = os.path.join(self.tempdir, 'output1.shp')

        a_bigint = 10 ** 18 - 1
        fieldname = 'abigint'

        kwargs = {
            'driver': 'ESRI Shapefile',
            'crs': 'EPSG:4326',
            'schema': {
                'geometry': 'Point',
                'properties': [(fieldname, 'int:10')]}}
        if get_gdal_version_num() < calc_gdal_version_num(2, 0, 0):
            with self.assertRaises(OverflowError):
                with fiona.open(name, 'w', **kwargs) as dst:
                    rec = {}
                    rec['geometry'] = {'type': 'Point', 'coordinates': (0, 0)}
                    rec['properties'] = {fieldname: a_bigint}
                    dst.write(rec)
        else:

            with fiona.open(name, 'w', **kwargs) as dst:
                rec = {}
                rec['geometry'] = {'type': 'Point', 'coordinates': (0, 0)}
                rec['properties'] = {fieldname: a_bigint}
                dst.write(rec)

            with fiona.open(name) as src:
                if get_gdal_version_num() >= calc_gdal_version_num(2, 0, 0):
                    first = next(iter(src))
                    self.assertEqual(first['properties'][fieldname], a_bigint)
Ejemplo n.º 31
0
    def getName(self):
        return (self.name)

    def setNhood(self, nh):
        self.nhood = nh

    def getNhood(self):
        return (self.nhood)


# read all of the rail stops data into an array of Points

rstops = []

with fiona.open("newgisdata/tm_rail_stops.shp") as input:
    for item in input:
        rstops.append(RailStop(item))

print("found ", len(rstops), " trimet rail stops")

mstops = list(filter(lambda rs: RailStop.isMAXstop(rs), rstops))

print("found ", len(mstops), " MAX stops")

# define a class to store information about neighborhoods


class Neighborhood:

    name = "Unknown"
Ejemplo n.º 32
0
__author__ = 'mtenney'
import pymongo
import folium
import vincent
import pandas as pd
import fiona
import datetime
from shapely import geometry
con = pymongo.MongoClient()
db = con.tweets
tweets_toronto = db.tweets_toronto
import os
os.chdir('../')
poly = fiona.open('D:\data\open_toronto\NEIGHBORHOODS_WGS84.shp')
from collections import Counter, OrderedDict
boros = {}

c = 0
for rex in poly:
    p = geometry.shape(rex['geometry'])
    xy = zip(p.boundary.xy[1],p.boundary.xy[0])
    res = list(tweets_toronto.find({'geo.coordinates': {"$geoWithin": {"$polygon":xy}}}))
    boros[rex['properties']['AREA_NAME']] = {'tweets':res, 'times':[datetime.datetime.strptime(tweet['created_at'],
                                                                '%a %b %d %H:%M:%S +0000 %Y')for tweet in res]}
    c+=1


poly = fiona.open('D:\data\open_toronto\NEIGHBORHOODS_WGS84.shp')
map = folium.Map([43.5,-79.37],width=1250,height=900,tiles='Stamen Toner')

c = 0
Ejemplo n.º 33
0
    def to_shp(self, out_filename, driver='ESRI Shapefile', epsg=4326):
        """
        Write ICESat data to shapefile.

        :param out_filename: Filename (optionally with path) of file to read out.
        :param driver: Name of driver fiona should use to create the outpufile. Default is 'ESRI Shapefile'.
        :param epsg: EPSG code to project data to. Default is 4326, WGS84 Lat/Lon.
        :type out_filename: str
        :type driver: str
        :type epsg: int

        Example:

        >>> donjek_icesat.to_shp('donjek_icesat.shp', epsg=3338)

        will write donjek_icesat to a shapefile in Alaska Albers projection (EPSG:3338)
        """
        # skip the lat, lon columns in the h5data
        # get the columns we're writing out
        props = []
        prop_inds = []
        data_names = [d.split('/')[-1] for d in self.data_names]

        for i, d in enumerate(data_names):
            props.append([d.rsplit(str(i), 1)[0], 'float'])
            prop_inds.append(i)
        lat_key = find_keyname(data_names, 'lat', 'first')
        lat_ind = self.h5data.attrs.get(lat_key)[0]
        lon_key = find_keyname(data_names, 'lon', 'first')
        lon_ind = self.h5data.attrs.get(lon_key)[0]
        prop_inds.remove(lat_ind)
        prop_inds.remove(lon_ind)

        props = OrderedDict(props)
        del props['d_lat'], props['d_lon']

        schema = {'properties': props, 'geometry': 'Point'}

        outfile = fiona.open(out_filename,
                             'w',
                             crs=fiona.crs.from_epsg(epsg),
                             driver=driver,
                             schema=schema)
        lat = self.lat
        lon = self.lon

        if epsg != 4326:
            dest_proj = pyproj.Proj(init='epsg:{}'.format(epsg))
            x, y = pyproj.transform(pyproj.Proj(init='epsg:4326'), dest_proj,
                                    lon, lat)
            pts = zip(x, y)
        else:
            pts = zip(lat, lon)

        for i, pt in enumerate(pts):
            this_data = self.h5data[prop_inds, i]
            out_data = OrderedDict(zip(props.keys(), this_data))
            point = Point(pt)
            outfile.write({'properties': out_data, 'geometry': mapping(point)})

        outfile.close()
Ejemplo n.º 34
0
def djangoToExportFormat(request,
                         filter_object,
                         properties_list=None,
                         geom_col="geom",
                         format="geojson"):
    """Convert a GeoDjango QuerySet to a GeoJSON Object"""

    #Workaround for mutable default value
    if properties_list is None:
        properties_list = []
        #Return dictionary of key value pairs
        filter_dict = filter_object[0].__dict__
        #Remove bunk fields
        for d in filter_dict:
            if isinstance(filter_dict[d], django.db.models.base.ModelState):
                pass
            # Convert decimal to float
            elif isinstance(filter_dict[d], Decimal):
                for obj in filter_object:
                    setattr(obj, d, float(obj.__dict__[d]))
                properties_list.append(d)
            # Convert date to string
            elif isinstance(filter_dict[d], date):
                for obj in filter_object:
                    setattr(obj, d, str(obj.__dict__[d]))
                properties_list.append(d)
            # Convert time to string
            elif isinstance(filter_dict[d], time):
                for obj in filter_object:
                    setattr(obj, d, str(obj.__dict__[d]))
                properties_list.append(d)
            else:
                properties_list.append(d)

        properties_list.remove(geom_col)

    queryset = filter_object
    djf = Django.Django(geodjango=geom_col, properties=properties_list)
    decode_djf = djf.decode(queryset)
    if format.lower() == 'geojson':
        geoj = GeoJSON.GeoJSON()
        # Pretty Print using JSON dumps method. Note requires setting
        # vectorformats encode method to_string param to False.
        geom_out = dumps(geoj.encode(decode_djf, to_string=False),
                         indent=4,
                         separators=(',', ': '))
        response = HttpResponse(geom_out, content_type="text/plain")
    elif format.lower() == 'kml':
        # title property can be passed as a keyword arg.
        # See vectorformats kml.py
        kml = KML.KML(title_property='name')
        geom_out = kml.encode(decode_djf)
        response = HttpResponse(
            geom_out, content_type="application/vnd.google-earth.kml+xml")
        response['Content-Disposition'] = 'attachment; filename="kml_out.kml"'
    elif format.lower() == 'shp':
        # convert to GeoJSON, then Use Fiona to Create a Shapefile.
        geoj = GeoJSON.GeoJSON()
        geoJSON = dumps(geoj.encode(decode_djf, to_string=False),
                        indent=4,
                        separators=(',', ': '))

        # Hard source properties for the destination shapefile.
        # These will be passed to Fiona.
        shp_driver = 'ESRI Shapefile'
        shp_crs = {
            'no_defs': True,
            'ellps': 'WGS84',
            'datum': 'WGS84',
            'proj': 'longlat'
        }
        shp_schema = {
            'geometry': decode_djf[0].geometry['type'],
            'properties': {
                'addDate': 'str',
                'collectDate': 'str',
                'collectionMethod': 'str',
                'comment': 'str',
                'featurePurpose': 'str',
                'group': 'str',
                'name': 'str',
                'updateDate': 'str'
            }
        }

        upload_dir = make_temp_dir()
        zipdir = os.path.join(upload_dir, decode_djf[0].properties['group'])

        with fiona.open(zipdir,
                        'w',
                        driver=shp_driver,
                        crs=shp_crs,
                        schema=shp_schema) as dest_shp:
            for feature in decode_djf:
                out_feature = {'geometry': {}, 'properties': {}}
                for property in shp_schema['properties']:
                    out_feature['properties'][property] = feature[
                        'properties'][property]
                out_feature['geometry'] = feature['geometry']
                dest_shp.write(out_feature)

        # Create the zip archive
        zip = make_zip_archive(zipdir)
        shp_zip = open(zip.filename)
        response = HttpResponse(FileWrapper(shp_zip),
                                content_type='application/zip')
        response['Content-Disposition'] = 'attachment; filename=shp_out.zip'
        response['Content-Length'] = os.path.getsize(zip.filename)

    else:
        raise ValueError
    return response
Ejemplo n.º 35
0
import os.path
from csv import reader
import pyproj
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.point import Point
import fiona
import dicts

STATEPLANES = []

with fiona.open(os.path.join(os.path.dirname(__file__),
                             'data/stateplane.shp')) as src:
    for f in src:
        if f['geometry']['type'] == 'MultiPolygon':
            f['geometry'] = MultiPolygon(
                [Polygon(c[0], c[1:]) for c in f['geometry']['coordinates']])

        elif f['geometry']['type'] == 'Polygon':
            f['geometry'] = Polygon(f['geometry']['coordinates'][0],
                                    f['geometry']['coordinates'][1:])

        STATEPLANES.append(f)

COFIPS = dict()


def _cofips():
    global COFIPS
    with open(os.path.join(os.path.dirname(__file__),
                           'data/countyfp.csv')) as rs:
Ejemplo n.º 36
0
Archivo: geepy.py Proyecto: whigg/geepy
def get_epsg(shp):
    with fiona.open(shp) as src:
        epsg = src.crs['init']
        pos = 5
        epsg_num = epsg[pos:]
    return epsg_num
Ejemplo n.º 37
0
 def make_empty(self):
     if not os.path.isfile(self.name[-1]):
         print(f"Making shapefile ({self.name[-1]})")
         sf = fiona.open(self.name[-1], 'w', 'ESRI Shapefile', self.schema, crs=self.crs)
         sf.close()
Ejemplo n.º 38
0
def get_wkt_from_shapefile(shapefile_path):
    '''returns the wkt string from the input shapefile'''
    c = fiona.open(shapefile_path)
    collection = [shapely.geometry.shape(item['geometry']) for item in c]
    return [j.wkt for j in collection][0]
Ejemplo n.º 39
0
from shapely.geometry import mapping, shape, Point

# This shapefile was made by intersecting all non-bridge motorway_links with non-motorways (surface streets).
# It's imperative that we do all line/point intersections in wgs84.
# Reprojecting can cause tiny errors that will cause point intersections to
# miss.
shapefile_dir = "/Users/mkirk/src/collision/visualizations/exit_ramps/layers/"
freeway_ramps_shapefile_name = 'non-bridge_freeway_ramps'
freeway_ramps_shapefile_path = os.path.join(
    shapefile_dir, freeway_ramps_shapefile_name,
    freeway_ramps_shapefile_name + ".shp")

print("processing {}.".format(freeway_ramps_shapefile_name))

with fiona.drivers():
    with fiona.open(freeway_ramps_shapefile_path, 'r') as source:

        output_meta = source.meta.copy()
        output_meta['schema']['geometry'] = 'Point'
        with fiona.open('freeway_ramp_start_points.shp', 'w',
                        **source.meta) as sink:
            for feature in source:
                first_point = Point(feature['geometry']['coordinates'][0])
                feature['geometry'] = mapping(first_point)
                sink.write(feature)

### This was a different attempts
##
## # We want to separate the on-ramps from the off-ramps.
## # We can do this by determing if the ramp moves towards a surface street or away from a surface street.
## # However, some "ramp" features represent both an off and an on-ramp, e.g. when
Ejemplo n.º 40
0
 # Convert coords to point shapefile
 # Temporary point shapefile prefix
 tpoints = odir + chulldir + os.sep + aname + '/temp_point_'
 ptsname = tpoints + aname + '.shp'
 hfu.coordinates2point(tuplist,ptsname,srs)
 # Make buffered convex hull for point set
 och1 = odir + chulldir + os.sep + aname + '/' + 'chull_' + aname + '.shp'
 hfu.convexhull(ptsname, och1, buff=1000)
 # Get patch 1 -----------
 g1 = int(aname.split('_')[0])
 pid1.append(g1) # append to list
 # Use fiona and geopandas to dissolve features if there are multiple ones with the same gridcode
 t1 = gpd.read_file(patches) # get projection info
 myproj = t1.crs
 t1 = None
 reader = fiona.open(patches)
 xx = gpd.GeoDataFrame.from_features((x for x in reader if x['properties']['GRIDCODE']==g1))
 if xx.shape[0] > 1:
     xx = xx.dissolve(by='GRIDCODE', aggfunc='sum')
 xx.crs = myproj
 xx['GRIDCODE'] = g1
 # Fix simple self intersections if necessary
 if xx.is_valid.bool() == False:
     xx = xx.buffer(0)
 # Write polygon to shapefile
 dissoshape1 = odir + chulldir + os.sep + aname + '/patch1_temp_' + str(g1) + '_' + str(j) + '.shp'
 xx.to_file(dissoshape1)
 reader.close()
 
 # Get patch 2 -----------
 g2 = int(aname.split('_')[1])
"""
# Create a session
client = BackendApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client)
# Get token for the session
token = oauth.fetch_token(token_url='https://services.sentinel-hub.com/oauth/token',
                          client_id=client_id, client_secret=client_secret)

# All requests using this session will have an access token automatically added
resp = oauth.get("https://services.sentinel-hub.com/oauth/tokeninfo")
print(resp.content)

# open boundary shapefile/json file
site = 'Arisaig'
site_shapefile = '/home/dmilodow/DataStore_DTM/STFC/DATA/EDINAAerial/%s_2017/%s_bbox_wgs84.shp' % (site,site)
for feature in fiona.open(site_shapefile): polygon = shape(feature['geometry'])

osm_splitter = OsmSplitter([polygon], CRS.WGS84, zoom_level=8) # Open Street Map Grid
search_bbox = osm_splitter

# define time interval of interest for imagery collection
search_time_interval = ('2019-06-25T00:00:00','2019-07-26T23:59:59')

# for each tile record the desired information
datainfo = pd.DataFrame(columns=['productIdentifier','tilecode','completionDate'])
for t,bbox in enumerate(search_bbox.bbox_list):
    for tile_info in get_area_info(bbox, search_time_interval, maxcc=0.20):
        datainfo = datainfo.append({'productIdentifier': tile_info['properties']['productIdentifier'],
                                    'tilecode' : tile_info['properties']['title'][49:55],
                                    'completionDate': tile_info['properties']['completionDate'][:10],
                                    }, ignore_index=True)
Ejemplo n.º 42
0
        print(" Low score .. stop ", max_score)



    return  list_wikidataid + rc_list_wikidataid , max_score








print('- Start Natural-Earth wikidata check - ')

with fiona.open('./natural-earth-vector/10m_cultural/ne_10m_populated_places.shp', 'r') as input:
        i=0
        for pt in input:
            i=i+1

            ne_fid= pt['id']
            ne_lat= str( pt['properties']['LATITUDE']  )
            ne_lon= str( pt['properties']['LONGITUDE'] )
            ne_name= pt['properties']['NAME']
            ne_namealt= pt['properties']['NAMEALT']
            ne_nameascii= pt['properties']['NAMEASCII']
            ne_wikidataid=pt['properties']['wikidataid']
            ne_adm0name=pt['properties']['ADM0NAME']
            ne_adm1name=pt['properties']['ADM1NAME']
            ne_ls_name=pt['properties']['LS_NAME']
            ne_geonameid=str(pt['properties']['GEONAMEID']).split('.')[0]
Ejemplo n.º 43
0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import fiona
import shapely.geometry

f = open("centroid.csv", "w")
f.write("id,lat,lon\n")

zone_shapes = []
zones = fiona.open("../data/src/Parcels_1598231695808.geojson")
for feature in zones:
    if feature['geometry']:
        centroid = shapely.geometry.shape(feature['geometry']).centroid
        f.write(",".join(
            map(str,
                (feature['properties']['PARCELID'], centroid.y, centroid.x))))
        f.write("\n")

f.close()
Ejemplo n.º 44
0
        # if multiline reader fails process offsetLine as a single line
        vertex = []
        x1off, y1off = offsetLine.xy
        print len(x1off), "|",
        for k in range(0, len(x1off)):
            vertex.append(
                [x1off[k] + shiftX - dX, y1off[k] + shiftY - dY, Depth])
        w.record(Depth)
        w.poly([vertex])
        plt.plot(x1off, y1off, c='r')

    plt.plot(xf, yf, c='lightgrey')
    #  print ""
    sys.stdout.flush()

print "\n<<< Writing perimeter offset >>>\n...", os.path.basename(
    OutFile) + 'shp'
print "... perimeter including islands =", dist, "m"
for s in w.shapes():
    s.shapeType = ShapeType
w.save(OutFile)

# write projection
with fiona.open(InputFile) as fp:
    prj = open(OutFile + "prj", "w")
    prj.write(fp.crs_wkt)
    prj.close()

# plot results
# plt.show()
Ejemplo n.º 45
0
import fiona
import rasterio
import rasterio.mask
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
basepath=r"F:\实验对比分析数据\YellowSea_MBR\\"
with fiona.open(basepath+'MBR2.shp', 'r') as shapefile:
    geometry = [feature["geometry"] for feature in shapefile]

def zonal(geometry,path):
    N_risk=[]
    with rasterio.open(path) as src:
        shapes = []
        for item in geometry:
            shapes.append(item)
            out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True)

            out_image=out_image[0]
            out_meta = src.meta
            out_meta.update({
                "driver": "GTiff",
                "height": out_image.shape[0],
                "width": out_image.shape[1],
                "transform": out_transform
            })
            shapes = []
            N_risk.append(np.sum(out_image>0.9)/np.sum(out_image>=0))
    return N_risk

gridPath=r"F:\WORDWIDE_Resample\Merge_predict_ssw\Merge/"
Ejemplo n.º 46
0
import requests
import json
import re

#import eventlet
#eventlet.monkey_patch()

#from flask import Flask
#from flask_socketio import SocketIO

#app = Flask(__name__)
#socket = SocketIO(app, logger=True, engineio_logger=True)

d = shelve.open("/home/nklugman/kplc_app/map/server/db/geolocate_cache")

kenya_constituency = fiona.open(
    "../data/shp/constituencies_simplified.shp")  #Simplified by QFIS
s = sched.scheduler(time.time, time.sleep)

try:
    conn = psycopg2.connect(
        "dbname='capstone' user='******' password='******'")
except:
    print "I am unable to connect to the database."
now = datetime.now()


def locate_area_to_shpID(area_list):
    id_list = region_define.region_check(area_list, shapefile)
    return id_list

Ejemplo n.º 47
0
    'CA': None,
    'CO': None,
    'CT': None,
    'DE': None,
    'FL': None,
    'GA': None,
    'ID': None,
    'IL': None,
    'MS': None,
    'NJ': None,
    'PA': None,
    'TX': None,
    'WI': None
}

shape_file = fiona.open("../data/tl_2017_us_state.shp")

t = time()
for collection in iter(shape_file):
    #
    # Each collection has four keys ['type', 'id', 'properties', 'geometry']
    state_abbreviation = collection['properties']['STUSPS']

    # Shapely Polygon object
    state_geometry = shape(collection['geometry'])
    if isinstance(state_geometry, Polygon):
        state_geometry = MultiPolygon([state_geometry])

    # State name
    state_name = collection['properties']['NAME']
Ejemplo n.º 48
0
def tmp_worker_job(self, task_index, task_data):

    worker_start_time = int(time.time())

    worker_tagline = "Worker {0} | Task {1} - ".format(self.rank, task_index)
    print worker_tagline

    # for each boundary dataset get boundary tracker
    bnd = task_data

    print "\tTracker for: {0}".format(bnd['options']['group'])
    print "\t\tName: {0}".format(bnd["name"])
    print "\t\tActive: {0}".format(bnd["active"])

    # ---------------------------------

    print '\t\tInitializing and populating tracker...'

    if not bnd["options"]["group"] in db_trackers.collection_names():
        c_bnd = db_trackers[bnd["options"]["group"]]
        c_bnd.create_index("name")  #, unique=True)
        c_bnd.create_index([("spatial", pymongo.GEOSPHERE)])
    else:
        c_bnd = db_trackers[bnd["options"]["group"]]

    # ---------------------------------

    # add each non-boundary dataset item to boundary tracker
    # collection with "unprocessed" flag if it is not already
    # in collection
    # (no longer done during ingest)

    for full_dset in dsets:
        dset = {
            'name': full_dset["name"],
            'type': full_dset["type"],
            'spatial': full_dset["spatial"],
            'scale': full_dset["scale"]
        }

        if c_bnd.find_one(dset) == None:
            dset['status'] = -1
            c_bnd.insert(dset)

    # ---------------------------------

    worker_tmp_runtime = int(time.time() - worker_start_time)
    print '\t\t\t...worker running for {}m {}s [#1]'.format(
        worker_tmp_runtime // 60, int(worker_tmp_runtime % 60))

    print '\t\tRunning relevance checks...'

    # lookup unprocessed data in boundary tracker that
    # intersect boundary (first stage search)

    search_status_list = [-1]

    if bnd["scale"] == "global":
        # NOTE: intersect/within at global (ie, >hemispehere)
        # may not work properly. using this as temp workaround
        # could potentially be impacting smaller datasets as well, not sure
        matches = list(c_bnd.find({"status": {"$in": search_status_list}}))
    else:
        matches = list(
            c_bnd.find({
                "status": {
                    "$in": search_status_list
                },
                "$or": [
                    {
                        "spatial": {
                            "$geoIntersects": {
                                "$geometry": bnd["spatial"]
                            }
                        }
                    },
                    # {
                    #     "spatial": {
                    #         "$geoWithin": {
                    #             "$geometry": bnd["spatial"]
                    #         }
                    #     }
                    # },
                    {
                        "scale": "global"
                    }
                ]
            }))

    print '\t\t{0} matches found'.format(len(matches))

    worker_tmp_runtime = int(time.time() - worker_start_time)
    print '\t\t\t...worker running for {}m {}s [#2]'.format(
        worker_tmp_runtime // 60, int(worker_tmp_runtime % 60))

    if len(matches) > 0:
        # boundary base and type
        bnd_path = os.path.join(bnd['base'], bnd["resources"][0]["path"])
        bnd_type = bnd['type']

        # bnd_geo = cascaded_union([shape(shp['geometry']) for shp in fiona.open(bnd_base, 'r')])

        with fiona.open(bnd_path, 'r') as bnd_src:
            minx, miny, maxx, maxy = bnd_src.bounds
            total_area = sum([shape(i['geometry']).area for i in bnd_src])

    # for each unprocessed dataset in boundary tracker matched in
    # first stage search (second stage search)
    # search boundary actual vs dataset actual
    for match in matches:
        print "\t\tChecking dataset: {0}".format(match['name'])

        meta_search = list(c_asdf.find({'name': match['name']}))

        if len(meta_search) == 0:
            print '\t\t\tCould not find dataset'
            c_bnd.update_one({"name": match['name']}, {"$set": {
                "status": -3
            }},
                             upsert=False)
            continue

        meta = meta_search[0]

        if "active" in meta and meta["active"] == 0:
            print '\t\t\tDataset inactive'
            c_bnd.update_one({"name": match['name']}, {"$set": {
                "status": -3
            }},
                             upsert=False)
            continue

        # dataset base and type
        test_resource = meta["resources"][0]["path"]
        if test_resource != "":
            dset_path = meta['base'] + "/" + meta["resources"][0]["path"]
        else:
            dset_path = meta['base']

        dset_type = meta['type']

        result = False

        if bnd["scale"] == "global":
            print '\t\t\tGlobal boundary'
            result = True

        elif dset_type == "raster":
            # true extract takes too long and is too costly to run
            # use a simple test of sample points over boundary bounding box
            # to do a good enough check of whether the data is relevant to boundary

            raster_src = rasterio.open(dset_path)

            pixel_size = raster_src.meta['transform'][1]
            nodata = raster_src.meta['nodata']

            xsize = (maxx - minx) / pixel_size
            ysize = (maxy - miny) / pixel_size

            # -----
            # this section creates the sample of pixels within extents of boundary data
            # *** potential flaw here is that samples are only within the extet, but
            #     not necessarily within the actual boundary. For data such as islands
            #     which have small areas but cover large extents, and which are surrounded
            #     by nodata vals, this could be an issue

            # minimum ratio of valid pixels required
            valid_sample_thresh = 0.05
            # maximum number of pixels to test
            pixel_limit = 100000

            # init as > than limit to force one run of loop
            sampled_pixel_count = pixel_limit + 1

            # increase step size until sample pixel count is small enough
            s = 1
            while sampled_pixel_count > pixel_limit:
                step_size = pixel_size * s
                predicted_xsize = (maxx - minx) / step_size
                predicted_ysize = (maxy - miny) / step_size
                sampled_pixel_count = predicted_xsize * predicted_ysize
                s += 1

            # -----

            xvals = np.arange(minx, maxx, step_size)
            yvals = np.arange(miny, maxy, step_size)
            samples = list(itertools.product(xvals, yvals))

            values = [val[0] for val in raster_src.sample(samples)]

            raster_src.close()

            clean_values = [i for i in values if i != nodata and i is not None]

            distinct_values = set(clean_values)

            # percent of samples resulting in clean value
            if len(clean_values) > len(samples) * valid_sample_thresh and len(
                    distinct_values) > 1:
                result = True
            else:
                print '\t\t\tPixel check did not pass'

            # else:
            #     # python raster stats extract
            #     extract = rs.gen_zonal_stats(bnd_path, dset_path, stats="min max", limit=200000)

            #     for i in extract:
            #         if i['min'] != None or i['max'] != None:
            #             result = True
            #             break

        elif dset_type == "release":

            # iterate over active (premable, iso3) in
            # release_iso3 field of config
            for k, v in config.release_iso3.items():
                if match['name'].startswith(k.lower()):
                    if ("gadm_iso3" in bnd["extras"]
                            and bnd["extras"]["gadm_iso3"].upper()
                            in v) or ("iso3" in bnd["extras"]
                                      and bnd["extras"]["iso3"].upper() in v):
                        result = True

                    elif "global" in v:

                        bnd_coords = bnd['spatial']['coordinates']

                        bnd_minx = bnd_coords[0][0][0]
                        bnd_miny = bnd_coords[0][1][1]
                        bnd_maxx = bnd_coords[0][2][0]
                        bnd_maxy = bnd_coords[0][0][1]

                        loc_count = db_releases[match['name']].count({
                            'locations.longitude': {
                                '$gte': bnd_minx,
                                '$lte': bnd_maxx
                            },
                            'locations.latitude': {
                                '$gte': bnd_miny,
                                '$lte': bnd_maxy
                            }
                        })

                        print "\t\t\t{0} locations found".format(loc_count)
                        if loc_count > 0:
                            result = True

        # elif dset_type == "polydata":

        #   # shapely intersect
        #   bnd_geo = cascaded_union(
        #       [shape(shp) for shp in shapefile.Reader(bnd_path).shapes()])
        #   dset_geo = cascaded_union(
        #       [shape(shp) for shp in shapefile.Reader(dset_path).shapes()])

        #   intersect = bnd_geo.intersects(dset_geo)

        #   if intersect == True:
        #       result = True

        else:
            print("\t\tError - Dataset type not yet supported (skipping)")
            c_bnd.update_one({"name": match['name']}, {"$set": {
                "status": -2
            }},
                             upsert=False)
            continue

        print '\t\t\tactive: {0}'.format(result)

        # check results and update tracker
        if result == True:
            c_bnd.update_one({"name": match['name']}, {"$set": {
                "status": 1
            }},
                             upsert=False)
        else:
            c_bnd.update_one({"name": match['name']}, {"$set": {
                "status": 0
            }},
                             upsert=False)

        # run third stage search on second stage matches
        # request actual vs dataset actual
        # may only be needed for user point input files
        #

        # update tracker for third stage search
        #

    worker_tmp_runtime = int(time.time() - worker_start_time)
    print '\t\t\t...worker running for {}m {}s [#3]'.format(
        worker_tmp_runtime // 60, int(worker_tmp_runtime % 60))

    # update tracker for all unprocessed dataset not matching first
    # stage search
    c_bnd.update_many({"status": -1}, {"$set": {"status": 0}}, upsert=False)

    # reset all inactive from placeholder status (-3) to unprocessed (-1)
    # so that their active state will be rechecked in case it changes
    #
    # Warning: datasets that have already been processed which are now inactive
    #          will be left alone. Applications should do their own checks on
    #          the active field.
    #
    # Note: As it related to this script, we must assume that
    #       a dataset is inactive because there is an error that may prohibit
    #       it being properly indexed, so it is continually left out until
    #       it is removed from data collection or set to active and indexed.
    c_bnd.update_many({"status": -3}, {"$set": {"status": -1}}, upsert=False)

    worker_tmp_runtime = int(time.time() - worker_start_time)
    print '\t\t\t...worker running for {}m {}s [#4]'.format(
        worker_tmp_runtime // 60, int(worker_tmp_runtime % 60))

    return bnd["name"]
Ejemplo n.º 49
0
def DisaggregateTileIntoSwaths(row, col, params, **kwargs):
    """
    see CarveLongitudinalSwaths
    """

    # tileset = config.tileset()
    # # height_raster = tileset.tilename('ax_flow_height', axis=axis, row=row, col=col)

    # def _tilename(dataset):
    #     return tileset.tilename(
    #         dataset,
    #         row=row,
    #         col=col)

    refaxis_shapefile = params.reference.filename(tileset=None)
    # config.filename(params.ax_reference)
    mask_raster = params.mask.tilename(row=row, col=col)
    # _tilename(params.ax_mask)

    output_distance = params.output_distance.tilename(row=row, col=col)
    # _tilename(params.output_distance)
    output_measure = params.output_measure.tilename(row=row, col=col)
    # _tilename(params.output_measure)
    output_nearest = params.output_nearest.tilename(row=row, col=col)
    # _tilename(params.output_nearest)
    output_swaths_raster = params.output_swaths_raster.tilename(row=row, col=col)
    # _tilename(params.output_swaths_raster)

    mdelta = params.mdelta

    if not os.path.exists(mask_raster):
        return {}

    with rio.open(mask_raster) as ds:

        # click.echo('Read Valley Bottom')

        # valley_bottom = speedup.raster_buffer(ds.read(1), ds.nodata, 6.0)
        mask = ds.read(1)
        height, width = mask.shape

        # distance = np.full_like(valley_bottom, ds.nodata)
        # measure = np.copy(distance)
        refaxis_pixels = list()

        # click.echo('Map Stream Network')

        def accept(i, j):
            return all([i >= -height, i < 2*height, j >= -width, j < 2*width])

        coord = itemgetter(0, 1)

        mmin = float('inf')
        mmax = float('-inf')

        with fiona.open(refaxis_shapefile) as fs:
            for feature in fs:

                axis = feature['properties']['AXIS']
                m0 = feature['properties'].get('M0', 0.0)
                length = asShape(feature['geometry']).length

                if m0 < mmin:
                    mmin = m0

                if m0 + length > mmax:
                    mmax = m0 + length

                coordinates = np.array([
                    coord(p) + (m0,) for p in reversed(feature['geometry']['coordinates'])
                ], dtype='float32')

                coordinates[1:, 2] = m0 + np.cumsum(np.linalg.norm(
                    coordinates[1:, :2] - coordinates[:-1, :2],
                    axis=1))

                coordinates[:, :2] = ta.worldtopixel(coordinates[:, :2], ds.transform, gdal=False)

                for a, b in zip(coordinates[:-1], coordinates[1:]):
                    for i, j, m in rasterize_linestringz(a, b):
                        if accept(i, j):
                            # distance[i, j] = 0
                            # measure[i, j] = m
                            refaxis_pixels.append((i, j, m, axis))

        # ta.shortest_distance(axr, ds.nodata, startval=1, distance=distance, feedback=ta.ConsoleFeedback())
        # ta.shortest_ref(axr, ds.nodata, startval=1, fillval=0, out=measure, feedback=ta.ConsoleFeedback())

        if not refaxis_pixels:
            return []

        mmin = math.floor(mmin / mdelta) * mdelta
        mmax = math.ceil(mmax / mdelta) * mdelta
        breaks = np.arange(mmin, mmax + mdelta, mdelta)

        # click.echo('Calculate Measure & Distance Raster')

        # Option 1, shortest distance

        # speedup.shortest_value(valley_bottom, measure, ds.nodata, distance, 1000.0)
        # distance = 5.0 * distance
        # distance[valley_bottom == ds.nodata] = ds.nodata
        # measure[valley_bottom == ds.nodata] = ds.nodata
        # Add 5.0 m x 10 pixels = 50.0 m buffer
        # distance2 = np.zeros_like(valley_bottom)
        # speedup.shortest_value(domain, measure, ds.nodata, distance2, 10.0)

        # Option 2, nearest using KD Tree

        nearest, measure, distance = nearest_value_and_distance(
            np.flip(np.array(refaxis_pixels), axis=0),
            np.float32(mask),
            ds.nodata)

        nodata = -99999.0
        distance = 5.0 * distance
        distance[mask == ds.nodata] = nodata
        measure[mask == ds.nodata] = nodata

        # click.echo('Write output')

        profile = ds.profile.copy()
        profile.update(compress='deflate', dtype='float32', nodata=nodata)

        with rio.open(output_distance, 'w', **profile) as dst:
            dst.write(distance, 1)

        with rio.open(output_measure, 'w', **profile) as dst:
            dst.write(measure, 1)

        profile.update(dtype='uint32', nodata=0)

        with rio.open(output_nearest, 'w', **profile) as dst:
            dst.write(nearest, 1)

        # click.echo('Create DGOs')

        dgo = np.zeros_like(measure, dtype='uint32')
        attrs = dict()

        for axis in np.unique(nearest):

            if axis == 0:
                continue

            ax_mask = (nearest == axis)
            ax_measure = measure[ax_mask]
            
            # mmin = math.floor(np.min(ax_measure) / mdelta) * mdelta
            # mmax = math.ceil(np.max(ax_measure) / mdelta) * mdelta
            # breaks = np.arange(mmin, mmax + mdelta, mdelta)

            dgo[ax_mask] = np.uint32(np.digitize(ax_measure, breaks))

            def calculate_attrs():

                measures = np.round(0.5 * (breaks + np.roll(breaks, 1)), 1)
                ax_dgo = np.zeros_like(dgo)
                ax_dgo[ax_mask] = dgo[ax_mask]
                boxes = speedup.flat_boxes(ax_dgo)

                if not boxes:
                    return dict()

                maximinj = itemgetter(2, 1)
                minimaxj = itemgetter(0, 3)

                lowerleft = fct.pixeltoworld(np.array([
                    maximinj(box) for box in boxes.values()
                ], dtype='int32'), ds.transform)

                upperright = fct.pixeltoworld(np.array([
                    minimaxj(box) for box in boxes.values()
                ], dtype='int32'), ds.transform)

                bounds = np.column_stack([lowerleft, upperright])

                return {
                    (axis, swath): (measures[swath], bounds[k])
                    for k, swath in enumerate(boxes)
                    if swath > 0
                }

            attrs.update(calculate_attrs())

        profile.update(nodata=0, dtype='uint32')

        with rio.open(output_swaths_raster, 'w', **profile) as dst:
            dst.write(dgo, 1)

        return attrs
Ejemplo n.º 50
0
def calculate_interuptions(gvi_points, shp_corridors, outshp, dist):

    import fiona
    from fiona import crs
    from shapely.geometry import shape, mapping
    from shapely.ops import substring
    from shapely.ops import transform
    from pyproj import Transformer, CRS
    from statistics import mean

    with fiona.open(gvi_points, 'r', encoding='UTF-8') as points:
        with fiona.open(shp_corridors, 'r', encoding='UTF-8') as corridors:
            wgs84 = CRS('EPSG:4326')
            pseudo_mercator = CRS('EPSG:3857')
            projection1 = Transformer.from_crs(wgs84,
                                               pseudo_mercator,
                                               always_xy=True).transform
            projection2 = Transformer.from_crs(pseudo_mercator,
                                               wgs84,
                                               always_xy=True).transform

            points_lst = []
            for point in points:
                points_lst.append([
                    shape(point['geometry']), point['properties']['greenView']
                ])

            corridors_lst = []
            for corridor in corridors:
                corridors_lst.append(
                    transform(projection1, shape(corridor['geometry'])))

            shp_schema = {
                'geometry': 'Polygon',
                'properties': {
                    'gvi': 'float',
                    'greening': 'int'
                }
            }
            with fiona.open(outshp,
                            'w',
                            encoding='UTF-8',
                            schema=shp_schema,
                            driver='ESRI Shapefile',
                            crs=crs.from_epsg(4326)) as corridor_interuptions:
                for corridor in corridors_lst:
                    buffer_gvi = []
                    min_dist = 0

                    for max_dist in range(dist,
                                          int(corridor.length) + dist, dist):
                        buffer_zone = transform(
                            projection2,
                            substring(corridor, min_dist,
                                      max_dist).buffer(30, cap_style=2))

                        gvi_values = []
                        for point, gvi in points_lst:
                            if point.within(buffer_zone):
                                gvi_values.append(gvi)

                        if len(gvi_values) == 0:
                            min_dist += dist
                            continue
                        buffer_gvi.append([buffer_zone, mean(gvi_values)])
                        min_dist += dist

                    gvi_lst = []
                    greening = 0
                    for idx, (buffer, gvi) in enumerate(buffer_gvi):
                        new_buffer = {}
                        gvi_lst.append(gvi)
                        new_buffer['geometry'] = mapping(buffer)
                        if idx < 2:
                            new_buffer['properties'] = {
                                'gvi': gvi,
                                'greening': greening
                            }
                            corridor_interuptions.write(new_buffer)
                        else:
                            if gvi < 0.7 * mean(gvi_lst[idx - 2:idx]):
                                greening = 1
                            elif gvi > 1.3 * mean(gvi_lst[idx - 2:idx]):
                                greening = 0
                            new_buffer['properties'] = {
                                'gvi': gvi,
                                'greening': greening
                            }
                            corridor_interuptions.write(new_buffer)
Ejemplo n.º 51
0
mapbox_access_token = "pk.eyJ1IjoiYmd6LWRhbmllbCIsImEiOiJjandxZTFibjkxOWEyNGJsZWRiZ253OXBoIn0.vpfoIUoYkhjpn42Eb13YCg"

DANIEL_POIS = "/home/daniel/Daniel/GDV_project/pois+road_data/gis_osm_pois_free_1.shp"
DANIEL_PROPERTIES = "/home/daniel/Daniel/GDV_project/wohnungsliste_19.05.2019.csv"
DANIEL_COORDINATES = "/home/daniel/Daniel/GDV_project/wh_latlng(2).csv"

MARK_POIS = "C:/Users/MarkBeckmann/PycharmProjects/gdvss2019/dashboard/pois+road_data/gis_osm_pois_free_1.shp"
MARK_PROPERTIES = "C:/Users/MarkBeckmann/PycharmProjects/gdvss2019/dashboard/wohnungsliste_19.05.2019.csv"
MARK_COODRINATES = "C:/Users/MarkBeckmann/PycharmProjects/gdvss2019/dashboard/wh_latlng(2).csv"

scl = [ [0,"rgb(5, 10, 172)"],[0.35,"rgb(40, 60, 190)"],[0.5,"rgb(70, 100, 245)"],\
    [0.6,"rgb(90, 120, 245)"],[0.7,"rgb(106, 137, 247)"],[1,"rgb(220, 220, 220)"] ]

mannheim = (8.2693, 49.3830, 8.6850, 49.5753)
with fiona.open(DANIEL_POIS) as src:
    features = list(src.filter(bbox=mannheim))
    pointsofinterest = pandas.DataFrame(
        {"id": [f["id"] for f in features],
         "longitude": [f["geometry"]["coordinates"][0] for f in features],
         "latitude": [f["geometry"]["coordinates"][1] for f in features],
         "fclass": [f["properties"]["fclass"] for f in features],
         "name": [f["properties"]["name"] for f in features]
        })
pointsofinterest["text"] = pointsofinterest["fclass"]
pointsofinterest = pointsofinterest.set_index(["fclass"])
education = pointsofinterest.loc[pointsofinterest.index.isin(["university", "school", "kindergarten", "college"])]
health = pointsofinterest.loc[pointsofinterest.index.isin(["pharmacy", "hospital", "doctors", "dentist", "veterinary"])]
leisure = pointsofinterest.loc[pointsofinterest.index.isin(["theatre", "nightclub", "cinema", "park", "playground", "dog_park", "sports_centre", "pitch", "swimming_pool", "tennis_court", "golf_course", "stadium"])]
public = pointsofinterest.loc[pointsofinterest.index.isin(["police", "fire_station", "post_box", "post_office", "library", "town_hall", "courthouse", "community_centre", "nursing_home", "arts_centre", "market_place"])]
catering = pointsofinterest.loc[pointsofinterest.index.isin(["restaurant", "fast_food", "cafe", "pub", "bar", "food_court", "biergarten"])]
Ejemplo n.º 52
0
def VectorizeSwathPolygons(params, processes=1, **kwargs):
    """
    Vectorize spatial units' polygons
    """

    # parameters = ValleyBottomParameters()
    # parameters.update({key: kwargs[key] for key in kwargs.keys() & parameters.keys()})
    # kwargs = {key: kwargs[key] for key in kwargs.keys() - parameters.keys()}
    # params = SwathMeasurementParams(**parameters)

    defs = ReadSwathsBounds(params)

    def arguments():

        for (axis, gid), (measure, bounds) in defs.items():
            yield (
                VectorizeOneSwathPolygon,
                axis,
                gid,
                measure,
                bounds,
                params,
                kwargs
            )

    output = params.output_swaths_shapefile.filename(tileset=None)
    # config.filename(params.output_swaths_shapefile, mod=False)

    schema = {
        'geometry': 'Polygon',
        'properties': [
            ('GID', 'int'),
            ('AXIS', 'int:4'),
            ('VALUE', 'int:4'),
            # ('ROW', 'int:3'),
            # ('COL', 'int:3'),
            ('M', 'float:10.2')
        ]
    }
    crs = fiona.crs.from_epsg(2154)
    options = dict(driver='ESRI Shapefile', crs=crs, schema=schema)

    with fiona.open(output, 'w', **options) as dst:

        with Pool(processes=processes) as pool:

            pooled = pool.imap_unordered(starcall, arguments())

            with click.progressbar(pooled, length=len(defs)) as iterator:
                for axis, gid, measure, polygons in iterator:
                    for (polygon, value) in polygons:

                        geom = asShape(polygon)
                        exterior = Polygon(geom.exterior).buffer(0)

                        feature = {
                            'geometry': exterior.__geo_interface__,
                            'properties': {
                                'GID': int(gid),
                                'AXIS': int(axis),
                                'VALUE': int(value),
                                # 'ROW': row,
                                # 'COL': col,
                                'M': float(measure)
                            }
                        }

                        dst.write(feature)

                        for ring in geom.interiors:

                            if not exterior.contains(ring):

                                feature = {
                                    'geometry': Polygon(ring).buffer(0).__geo_interface__,
                                    'properties': {
                                        'GID': int(gid),
                                        'AXIS': int(axis),
                                        'VALUE': int(value),
                                        # 'ROW': row,
                                        # 'COL': col,
                                        'M': float(measure)
                                    }
                                }

                                dst.write(feature)
Ejemplo n.º 53
0
 def setUp(self):
     vfs = 'zip://{}'.format(os.path.abspath(self.path_coutwildrnp_zip))
     self.c = fiona.open("/coutwildrnp.shp", "r", vfs=vfs)
Ejemplo n.º 54
0
#     with open("D:/Wandelroutes/Text/routes_{}.txt".format(index), "wb") as file:
#         file.write(response.content)
#     data = json.loads(response.content)
#     print("Index / routes count / total routes: ", index, "/", len(data['result']['routes']), "/", data['result']['total'])
#
#     for route in data['result']['routes']:
#         time.sleep(0.5)
#         route_url = "https://download.routeyou.com/k-9aec2fc1705896b901c3ea17d6223f0a/route/{}.gpx?language=nl".format(route['id'])
#         filepath = "D:/Wandelroutes/GPX/{}.gpx".format(route['id'])
#         download_to_file(route_url, default_headers, filepath)

dir_filepath = "D:/Wandelroutes/GPX"
filenames = os.listdir(dir_filepath)
rows_list = []
for filename in filenames:
    layer = fiona.open(os.path.join(dir_filepath, filename), layer='tracks')
    geom = layer[0]
    route_name = geom['properties']['name']
    route_geodata = {
        'type': 'MultiLineString',
        'coordinates': geom['geometry']['coordinates']
    }
    route_geometry = shape(route_geodata)
    route_id = os.path.splitext(os.path.basename(filename))[0]
    route_dict = {
        'id': str(route_id),
        'name': route_name,
        'url': "https://www.routeyou.com/nl-nl/route/view/" + str(route_id),
        'geometry': route_geometry
    }
    rows_list.append(route_dict)
Ejemplo n.º 55
0
 def setUp(self):
     self.c = fiona.open("zip://{}".format(self.path_coutwildrnp_zip, "r"))
     self.path = os.path.join(self.data_dir, 'coutwildrnp.zip')
Ejemplo n.º 56
0
def create_vector_mrf(input_file_path,
                      output_path,
                      mrf_prefix,
                      layer_name,
                      target_x,
                      target_y,
                      target_extents,
                      tile_size,
                      overview_levels,
                      projection_str,
                      filter_list,
                      feature_reduce_rate=2.5,
                      cluster_reduce_rate=2,
                      debug=False):
    """
    Creates a MVT MRF stack using the specified TileMatrixSet.

    NOTE: Vectors don't have a concept of pixels. We're only using pixel dimensions as a way of expressing the proportional size of the tiles and to match them up with
    raster tilesets.

    Args:
        input_file_path (str) -- Path to the vector datafile to be used. Accepts GeoJSON and Shapefiles
        output_path (str) -- Path to where the output MRF files should be stored.
        mrf_prefix (str) -- Prefix for the MRF filenames that will be generated.
        layer_name (str) -- Name for the layer to be packed into the tile. Only single layers currently supported.
        target_x (int) -- Pixel width of the highest zoom level.
        target_y (int) -- Pixel height of the highest zoom level.
        target_extents (list float) -- The bounding box for the chosen projection in map units.
        tile_size (int) -- Pixel size of the tiles to be generated.
        overview_levels (list int) -- A list of the overview levels to be used (i.e., a level of 2 will render a level that's 1/2 the width and height of the base level)
        projection_str (str) -- EPSG code for the projection to be used.
        filter_list (list object) -- List of options for filtering features
        feature_reduce_rate (float) -- (currently only for Point data) Rate at which to reduce features for each successive zoom level.
            Defaults to 2.5 (1 feature retained for every 2.5 in the previous zoom level)
        cluster_reduce_rate (float) -- (currently only for Point data) Rate at which to reduce points in clusters of 1px or less.
            Default is 2 (retain the square root of the total points in the cluster).
        debug (bool) -- Toggle verbose output messages and MVT file artifacts (MVT tile files will be created in addition to MRF)
    """
    # Get projection and calculate overview levels if necessary
    proj = osr.SpatialReference()
    proj.ImportFromEPSG(int(projection_str.split(':')[1]))
    if not target_y:
        target_y = (target_x / 2) if proj.IsGeographic() else target_x
    if not overview_levels:
        overview_levels = [2]
        exp = 2
        while (overview_levels[-1] * tile_size) < target_x:
            overview_levels.append(2**exp)
            exp += 1

    tile_matrices = get_tms(target_x, target_y, target_extents, tile_size,
                            overview_levels, proj)

    # Open MRF data and index files and generate the MRF XML
    fidx = open(os.path.join(output_path, mrf_prefix + '.idx'), 'w+')
    fout = open(os.path.join(output_path, mrf_prefix + '.pvt'), 'w+')
    notile = struct.pack('!QQ', 0, 0)
    pvt_offset = 0

    mrf_dom = build_mrf_dom(tile_matrices, target_extents, tile_size, proj)
    with open(os.path.join(output_path, mrf_prefix) + '.mrf', 'w+') as f:
        f.write(mrf_dom.toprettyxml())

    spatial_dbs = []
    source_schemas = []
    # Dump contents of shapefile into a mutable rtree spatial database for faster searching.
    for input_file in input_file_path:
        print 'Processing ' + input_file
        with fiona.open(input_file) as shapefile:
            try:
                spatial_db = rtree.index.Index(
                    rtree_index_generator(list(shapefile), filter_list))
            except rtree.core.RTreeError as e:
                print 'ERROR -- problem importing feature data. If you have filters configured, the source dataset may have no features that pass. Err: {0}'.format(
                    e)
                sys.exit()
            spatial_dbs.append(spatial_db)
            source_schema = shapefile.schema['geometry']
            source_schemas.append(source_schema)
            if debug:
                print 'Points to process: ' + str(
                    spatial_db.count(spatial_db.bounds))

    # Build tilematrix pyramid from the bottom (highest zoom) up. We generate tiles left-right, top-bottom and write them
    # successively to the MRF.
    for i, tile_matrix in enumerate(reversed(tile_matrices)):
        z = len(tile_matrices) - i - 1

        for idx, spatial_db in enumerate(spatial_dbs):
            # We do general point rate reduction randomly, deleting those items from the
            # spatial index. The highest zoom level is never reduced.
            if source_schemas[
                    idx] == 'Point' and feature_reduce_rate and z != len(
                        tile_matrices) - 1:
                feature_count = spatial_dbs[idx].count(spatial_dbs[idx].bounds)
                num_points_to_delete = int(feature_count -
                                           math.floor(feature_count /
                                                      feature_reduce_rate))
                if debug:
                    print 'Deleting ' + str(
                        num_points_to_delete) + ' points from dataset'
                for feature in random.sample([
                        feature for feature in spatial_dbs[idx].intersection(
                            spatial_dbs[idx].bounds, objects=True)
                ], num_points_to_delete):
                    spatial_dbs[idx].delete(feature.id, feature.bbox)

            # Here we're culling points that are less than a pixel away from each other. We use a queue to keep track of them and avoid looking at points twice.
            if source_schemas[
                    idx] == 'Point' and cluster_reduce_rate and z != len(
                        tile_matrices) - 1:
                feature_queue = [
                    item for item in spatial_dbs[idx].intersection(
                        spatial_dbs[idx].bounds, objects=True)
                ]
                while feature_queue:
                    feature = feature_queue.pop()
                    sub_pixel_bbox = (
                        feature.bbox[0] - tile_matrix['resolution'],
                        feature.bbox[1] - tile_matrix['resolution'],
                        feature.bbox[2] + tile_matrix['resolution'],
                        feature.bbox[3] + tile_matrix['resolution'])
                    nearby_points = [
                        item for item in spatial_dbs[idx].intersection(
                            sub_pixel_bbox, objects=True)
                        if item.id != feature.id
                    ]
                    if nearby_points:
                        # We reduce the number of clustered points to 1/nth of their previous number. (user-selectable)
                        # All the nearby points are then dropped from the queue.
                        for point in random.sample(
                                nearby_points,
                                len(nearby_points) - int(
                                    math.floor(
                                        len(nearby_points)
                                        **(1 / float(cluster_reduce_rate))))):
                            spatial_dbs[idx].delete(point.id, point.bbox)
                        for point in nearby_points:
                            [
                                feature_queue.remove(item)
                                for item in feature_queue
                                if item.id == point.id
                            ]

        # Start making tiles. We figure out the tile's bbox, then search for all the features that intersect with that bbox,
        # then turn the resulting list into an MVT tile and write the tile.
        for y in xrange(tile_matrix['matrix_height']):
            for x in xrange(tile_matrix['matrix_width']):
                # Get tile bounds
                min_x = tile_matrix['matrix_extents'][0] + (
                    x * tile_matrix['tile_size_in_map_units'])
                max_y = tile_matrix['matrix_extents'][3] - (
                    y * tile_matrix['tile_size_in_map_units'])
                max_x = min_x + tile_matrix['tile_size_in_map_units']
                min_y = max_y - tile_matrix['tile_size_in_map_units']
                tile_bbox = shapely.geometry.box(min_x, min_y, max_x, max_y)

                # MVT tiles usually have a buffer around the edges for rendering purposes
                tile_buffer = 5 * (tile_matrix['tile_size_in_map_units'] / 256)
                tile_buffer_bbox = shapely.geometry.box(
                    min_x - tile_buffer, min_y - tile_buffer,
                    max_x + tile_buffer, max_y + tile_buffer)

                if debug:
                    print "Processing tile: {0}/{1}/{2}\r".format(z, x, y)
                    print 'Tile Bounds: ' + str(tile_bbox.bounds)

                # Iterate through the shapefile geometry and grab anything in this tile's bounds
                tile_features = []
                for spatial_db in spatial_dbs:
                    for feature in [
                            item.object for item in spatial_db.intersection(
                                tile_buffer_bbox.bounds, objects=True)
                    ]:
                        geometry = shapely.geometry.shape(feature['geometry'])
                        # If the feature isn't fully contained in the tile bounds, we need to clip it.
                        if not shapely.geometry.shape(
                                feature['geometry']).within(tile_buffer_bbox):
                            geometry = tile_buffer_bbox.intersection(geometry)
                        new_feature = {
                            'geometry': geometry,
                            'properties': feature['properties']
                        }
                        tile_features.append(new_feature)

                # Create MVT tile from the features in this tile (Only doing single layers for now)
                new_layer = {'name': layer_name, 'features': tile_features}
                # Have to change the default rounding if in Python 2.6 due to Decimal rounding issues.
                if sys.version_info < (2, 7):
                    round_fn = py_26_round_fn
                else:
                    round_fn = None
                mvt_tile = mapbox_vector_tile.encode(
                    [new_layer],
                    quantize_bounds=tile_bbox.bounds,
                    y_coord_down=False,
                    round_fn=round_fn)

                # Write out artifact mvt files for debug mode.
                if debug and mvt_tile:
                    mvt_filename = os.path.join(
                        os.getcwd(),
                        'tiles/test_{0}_{1}_{2}.mvt'.format(z, x, y))
                    with open(mvt_filename, 'w+') as f:
                        f.write(mvt_tile)

                # Write out MVT tile data to MRF. Note that we have to gzip the tile first.
                if mvt_tile:
                    out = StringIO.StringIO()
                    gzip_obj = gzip.GzipFile(fileobj=out, mode='w')
                    gzip_obj.write(mvt_tile)
                    gzip_obj.close()
                    zipped_tile_data = out.getvalue()
                    tile_index = struct.pack('!QQ', pvt_offset,
                                             len(zipped_tile_data))
                    pvt_offset += len(zipped_tile_data)
                    fout.write(zipped_tile_data)

                else:
                    tile_index = notile
                fidx.write(tile_index)

    fidx.close()
    fout.close()
    return
Ejemplo n.º 57
0
def test_write_json_object_properties():
    """Python object properties are properly serialized"""
    data = """
{
  "type": "FeatureCollection",
  "features": [
    {
      "geometry": {
        "type": "Polygon",
        "coordinates": [
          [
            [
              87.33588,
              43.53139
            ],
            [
              87.33588,
              45.66894
            ],
            [
              90.27542,
              45.66894
            ],
            [
              90.27542,
              43.53139
            ],
            [
              87.33588,
              43.53139
            ]
          ]
        ]
      },
      "type": "Feature",
      "properties": {
        "upperLeftCoordinate": {
          "latitude": 45.66894,
          "longitude": 87.91166
        },
        "tricky": "{gotcha"
      }
    }
  ]
}
"""
    data = json.loads(data)['features'][0]
    tmpdir = tempfile.mkdtemp()
    filename = os.path.join(tmpdir, 'test.json')
    with fiona.open(filename,
                    'w',
                    driver='GeoJSON',
                    schema={
                        'geometry': 'Polygon',
                        'properties': {
                            'upperLeftCoordinate': 'str',
                            'tricky': 'str'
                        }
                    }) as dst:
        dst.write(data)

    with fiona.open(filename) as src:
        ftr = next(iter(src))
        props = ftr['properties']
        assert props['upperLeftCoordinate']['latitude'] == 45.66894
        assert props['upperLeftCoordinate']['longitude'] == 87.91166
        assert props['tricky'] == "{gotcha"
Ejemplo n.º 58
0
 def setUp(self):
     vfs = 'zip://{}'.format(self.path_coutwildrnp_zip)
     self.c = fiona.open("/coutwildrnp.shp", "r", vfs=vfs)
     self.path = os.path.join(self.data_dir, 'coutwildrnp.zip')
Ejemplo n.º 59
0
def clean_ways(orig_file, DOC_FP):
    """
    Reads in osm_ways file, cleans up the features, and reprojects
    results into 3857 projection
    Additionally writes a key which shows the correspondence between
    highway type as a string and the resulting int feature
    Features:
        width
        lanes
        hwy_type
        osm_speed
        signal
    Args:
        orig_file: Filename for original file
        result_file: Filename for resulting file in 3857 projection
        DOC_FP: directory to write highway keys file to
    Returns:
        a list of reprojected way lines
    """

    way_lines = fiona.open(orig_file)

    highway_keys = {}
    results = []
    for way_line in way_lines:

        speed = get_speed(way_line['properties']['maxspeed']) \
            if 'maxspeed' in list(way_line['properties']) else 0
        width = get_width(way_line['properties']['width']) \
            if 'width' in list(way_line['properties']) else 0

        lanes = way_line['properties']['lanes']
        if lanes:
            lanes = max([int(x) for x in re.findall('\d', lanes)])
        else:
            lanes = 0

        # Need to have an int highway field
        if way_line['properties']['highway'] not in list(highway_keys.keys()):
            highway_keys[way_line['properties']['highway']] = len(highway_keys)

        # Width per lane
        width_per_lane = 0
        if lanes and width:
            width_per_lane = round(width / lanes)

        # Use oneway
        oneway = 0
        if way_line['properties']['oneway'] == 'True':
            oneway = 1

        way_line['properties'].update({
            'width':
            width,
            'lanes':
            int(lanes),
            'hwy_type':
            highway_keys[way_line['properties']['highway']],
            'osm_speed':
            speed,
            'signal':
            0,
            'oneway':
            oneway,
            'width_per_lane':
            width_per_lane
        })
        results.append(way_line)

    write_highway_keys(DOC_FP, highway_keys)
    return results
Ejemplo n.º 60
0
 def setUp(self):
     vfs = "tar://{}".format(self.path_coutwildrnp_tar)
     self.c = fiona.open("/testing/coutwildrnp.shp", "r", vfs=vfs)
     self.path = os.path.join(self.data_dir, 'coutwildrnp.tar')