def __init__(self,crs=None,prjs=None,epsg=None): if crs is None: if prjs is not None: crs = from_string(prjs) elif epsg is not None: sr = SpatialReference() sr.ImportFromEPSG(epsg) crs = from_string(sr.ExportToProj4()) else: raise(NotImplementedError) else: ## remove unicode and change to python types for k,v in crs.iteritems(): if type(v) == unicode: crs[k] = str(v) else: try: crs[k] = v.tolist() except AttributeError: continue sr = SpatialReference() sr.ImportFromProj4(to_string(crs)) self.value = from_string(sr.ExportToProj4()) try: assert(self.value != {}) except AssertionError: ocgis_lh(logger='crs',exc=ValueError('Empty CRS: The conversion to PROJ4 may have failed. The CRS value is: {0}'.format(crs)))
def __init__(self, value=None, proj4=None, epsg=None, name=OcgisConvention.Name.COORDSYS): self.name = name # Allows operations on data variables to look through an empty dimension list. Alleviates instance checking. self.dimensions = tuple() self.dimension_names = tuple() self.has_bounds = False self._epsg = epsg # Some basic overloaded for WGS84. if epsg == 4326: value = WGS84().value # Add a special check for init keys in value dictionary. if value is not None: if 'init' in value and list(value.values())[0].startswith('epsg'): epsg = int(list(value.values())[0].split(':')[1]) value = None if value is None: if proj4 is not None: value = from_string(proj4) elif epsg is not None: sr = SpatialReference() sr.ImportFromEPSG(epsg) value = from_string(sr.ExportToProj4()) else: msg = 'A value dictionary, PROJ.4 string, or EPSG code is required.' raise ValueError(msg) else: # Remove unicode to avoid strange issues with proj and fiona. for k, v in value.items(): if isinstance(v, six.string_types): value[k] = str(v) else: try: value[k] = v.tolist() # this may be a numpy arr that needs conversion except AttributeError: continue sr = SpatialReference() sr.ImportFromProj4(to_string(value)) self.value = from_string(sr.ExportToProj4()) try: assert self.value != {} except AssertionError: msg = 'Empty CRS: The conversion to PROJ.4 may have failed. The CRS value is: {0}'.format( value) raise ValueError(msg)
def __init__(self, value=None, proj4=None, epsg=None, name=OcgisConvention.Name.COORDSYS): self.name = name # Allows operations on data variables to look through an empty dimension list. Alleviates instance checking. self.dimensions = tuple() self.dimension_names = tuple() self.has_bounds = False self._epsg = epsg # Some basic overloaded for WGS84. if epsg == 4326: value = WGS84().value # Add a special check for init keys in value dictionary. if value is not None: if 'init' in value and list(value.values())[0].startswith('epsg'): epsg = int(list(value.values())[0].split(':')[1]) value = None if value is None: if proj4 is not None: value = from_string(proj4) elif epsg is not None: sr = SpatialReference() sr.ImportFromEPSG(epsg) value = from_string(sr.ExportToProj4()) else: msg = 'A value dictionary, PROJ.4 string, or EPSG code is required.' raise ValueError(msg) else: # Remove unicode to avoid strange issues with proj and fiona. for k, v in value.items(): if isinstance(v, six.string_types): value[k] = str(v) else: try: value[k] = v.tolist() # this may be a numpy arr that needs conversion except AttributeError: continue sr = SpatialReference() sr.ImportFromProj4(to_string(value)) self.value = from_string(sr.ExportToProj4()) try: assert self.value != {} except AssertionError: msg = 'Empty CRS: The conversion to PROJ.4 may have failed. The CRS value is: {0}'.format(value) raise ValueError(msg)
def __init__(self, filename): self.__dataset__ = self self.__gdal__ = G.Open(filename, G.GA_ReadOnly) self.shape = (self.__gdal__.RasterYSize,self.__gdal__.RasterXSize) try: self.wkt = self.__gdal__.GetProjection() self.__osr__ = osr.SpatialReference() self.__osr__.ImportFromWkt(self.wkt) self.crs = from_string(self.__osr__.ExportToProj4()) self.gcs = from_string(self.__osr__.CloneGeogCS().ExportToProj4()) self.projected = True except AttributeError: self.projected = False
def test_towgs84(): """+towgs84 is preserved""" proj4 = ('+proj=lcc +lat_1=49 +lat_2=46 +lat_0=47.5 ' '+lon_0=13.33333333333333 +x_0=400000 +y_0=400000 +ellps=bessel ' '+towgs84=577.326,90.129,463.919,5.137,1.474,5.297,2.4232 ' '+units=m +wktext +no_defs') assert 'towgs84' in crs.from_string(proj4)
def crs(self): if self._crs is None: if self._proj_str is not None: self._crs = from_string(self._proj_str) elif self.epsg is not None: self._crs = from_epsg(self.epsg) return self._crs
def write_shapefile(vl, path, name=None, df=None, driver="ESRI Shapefile"): import fiona from fiona import crs layer = None if driver == "ESRI ShapeFile": layer = vl.name if vl.name is not None else "layer_1" uri = fileutils.parse_uri(path) if uri.scheme in ["s3", "gs"]: path = mkdtemp() elif uri.scheme == "file": if os.path.exists(path): raise IOError("Path exists:" + path) if path.endswith("/"): path = path[:-1] try: proj4_str = vl.proj.ExportToProj4() proj = crs.from_string(proj4_str) schema = get_schema(df) if df is not None else {} records = vl.to_dict(df)["features"] schema["geometry"] = records[0]["geometry"]["type"] __id__ = records[0]["properties"]["__id__"] k = schema["properties"] k["__id__"] = "int:64" if isinstance(__id__, int) else "str" with fiona.open(path, "w", driver=driver, layer=layer, crs=proj, schema=schema) as c: c.writerecords(records) zippath = None if driver == "ESRI Shapefile": zippath = create_zip(path) fpath = path if zippath is None else zippath if uri.scheme in ["s3", "gs"]: s3path = uri_to_string(uri) try: upload(fpath, s3path, remove_local=True) finally: if os.path.exists(fpath): os.remove(fpath) return s3path return fpath finally: if os.path.isdir(path): for f in os.listdir(path): os.remove(os.path.join(path, f)) os.removedirs(path)
def test_from_string_utm(): # A PROJ.4 string with extra whitespace and integer UTM zone. val = crs.from_string(" +proj=utm +zone=13 +ellps=WGS84 +foo ") assert len(val.items()) == 3 assert val['proj'] == 'utm' assert val['ellps'] == 'WGS84' assert val['zone'] == 13 assert 'foo' not in val
def crs(self): """todo: refactor this to proj_dict""" if self._crs is None: if self._proj_str is not None: self._crs = from_string(self._proj_str) elif self.epsg is not None: self._crs = from_epsg(self.epsg) return self._crs
def test_from_string_utm(): # A PROJ.4 string with extra whitespace and integer UTM zone. val = crs.from_string(" +proj=utm +zone=13 +ellps=WGS84 +foo ") assert len(val.items()) == 3 assert val["proj"] == "utm" assert val["ellps"] == "WGS84" assert val["zone"] == 13 assert "foo" not in val
def export_to_file(self, tenant): connection.close() connection.set_schema(tenant) layer_file = LayerFile.objects.filter( layer=self, status='finished').order_by('modified').last() if layer_file is None: layer_file = LayerFile(layer=self, status='working') layer_file.save() try: filename = slugify(self.name + str(datetime.now())) path = "%s/downloads/shapefile/%s/%s" % (settings.MEDIA_ROOT, tenant, filename) crs = from_string(self.feature_set.first().geometry.crs.proj4) self.refresh_from_db() if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) with fiona.open(path + '.shp', 'w', driver='ESRI Shapefile', schema=self.schema, crs=crs) as out: for feature in self.feature_set.all(): properties = feature.properties try: del properties['fid'] except: pass if self.schema['geometry'] == 'GeometryCollection': geometry = json.loads(feature.geometry.json) else: geometry = json.loads( feature.geometry.json)['geometries'][0] out.write({ 'geometry': geometry, 'properties': feature.properties, }) os.chdir(os.path.dirname(path)) with ZipFile(path + '.zip', 'w') as shape_zip: for f in glob('%s.*' % filename): if not os.path.basename(f).endswith('zip'): shape_zip.write(f) layer_file.file = "downloads/shapefile/%s/%s.zip" % (tenant, filename) layer_file.status = 'finished' except Exception as e: raise layer_file.status = 'failed' finally: layer_file.save() return layer_file
def test_from_string_utm(): # A PROJ.4 string with extra whitespace and integer UTM zone. val = crs.from_string( " +proj=utm +zone=13 +ellps=WGS84 +foo " ) assert len(val.items()) == 3 assert val['proj'] == 'utm' assert val['ellps'] == 'WGS84' assert val['zone'] == 13 assert 'foo' not in val
def test_from_string(): # A PROJ.4 string with extra whitespace. val = crs.from_string(" +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +foo ") assert len(val.items()) == 4 assert val["proj"] == "longlat" assert val["ellps"] == "WGS84" assert val["datum"] == "WGS84" assert val["no_defs"] == True assert "foo" not in val
def write_shapefile(vl, path, name=None, df=None, driver="ESRI Shapefile"): import fiona from fiona import crs layer = None if driver == "ESRI ShapeFile": layer = vl.name if vl.name is not None else "layer_1" uri = fileutils.parse_uri(path) if uri.scheme == "s3": path = mkdtemp() elif uri.scheme == "file": if os.path.exists(path): raise IOError("Path exists:" + path) if path.endswith("/"): path = path[:-1] try: proj4_str = vl.proj.ExportToProj4() proj = crs.from_string(proj4_str) schema = get_schema(df) if df is not None else {} records = vl.to_dict(df)["features"] schema["geometry"] = records[0]["geometry"]["type"] __id__ = records[0]["properties"]["__id__"] k = schema["properties"] k["__id__"] = "int:64" if isinstance(__id__, int) else "str" with fiona.open(path, "w", driver=driver, layer=layer, crs=proj, schema=schema) as c: c.writerecords(records) zippath = None if driver == "ESRI Shapefile": zippath = create_zip(path) fpath = path if zippath is None else zippath if uri.scheme == "s3": s3path = uri_to_string(uri) try: upload(fpath, s3path, remove_local=True) finally: if os.path.exists(fpath): os.remove(fpath) return s3path return fpath finally: if os.path.isdir(path): for f in os.listdir(path): os.remove(os.path.join(path, f)) os.removedirs(path)
def set_projection(self): from fiona.crs import to_string, from_epsg, from_string if self.prj is not None: self.get_proj4() if self.proj4 is not None: self.crs = from_string(self.proj4) elif self.epsg is not None: self.crs = from_epsg(self.epsg) else: pass
def test_from_string(): # A PROJ.4 string with extra whitespace. val = crs.from_string( " +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +foo ") assert len(val.items()) == 4 assert val['proj'] == 'longlat' assert val['ellps'] == 'WGS84' assert val['datum'] == 'WGS84' assert val['no_defs'] == True assert 'foo' not in val
def test_from_string(): # A PROJ.4 string with extra whitespace. val = crs.from_string( " +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +foo " ) assert len(val.items()) == 4 assert val['proj'] == 'longlat' assert val['ellps'] == 'WGS84' assert val['datum'] == 'WGS84' assert val['no_defs'] == True assert 'foo' not in val
def to_geopandas(raster): """ Convert GeoRaster to GeoPandas DataFrame, which can be easily exported to other types of files and used to do other types of operations. The DataFrame has the geometry (Polygon), row, col, value, x, and y values for each cell Usage: df = gr.to_geopandas(raster) """ df = to_pandas(raster) df['geometry'] = df.apply(squares, georaster=raster, axis=1) df = gp.GeoDataFrame(df, crs=from_string(raster.projection.ExportToProj4())) return df
def __init__(self, value=None, proj4=None, epsg=None, name=constants.DEFAULT_COORDINATE_SYSTEM_NAME): self.name = name # Add a special check for init keys in value dictionary. if value is not None: if 'init' in value and value.values()[0].startswith('epsg'): epsg = int(value.values()[0].split(':')[1]) value = None if value is None: if proj4 is not None: value = from_string(proj4) elif epsg is not None: sr = SpatialReference() sr.ImportFromEPSG(epsg) value = from_string(sr.ExportToProj4()) else: msg = 'A value dictionary, PROJ.4 string, or EPSG code is required.' raise ValueError(msg) else: # Remove unicode to avoid strange issues with proj and fiona. for k, v in value.iteritems(): if type(v) == unicode: value[k] = str(v) else: try: value[k] = v.tolist() # this may be a numpy arr that needs conversion except AttributeError: continue sr = SpatialReference() sr.ImportFromProj4(to_string(value)) self.value = from_string(sr.ExportToProj4()) try: assert self.value != {} except AssertionError: msg = 'Empty CRS: The conversion to PROJ.4 may have failed. The CRS value is: {0}'.format(value) raise ValueError(msg)
def set_projection(self): try: from fiona.crs import to_string, from_epsg, from_string except: print('\nGIS dependencies not installed. Please see readme for instructions on installation') if self.prj is not None: self.get_proj4() if self.proj4 is not None: self.crs = from_string(self.proj4) elif self.epsg is not None: self.crs = from_epsg(self.epsg) else: pass
def buildGeopackage(self): tmpdir = tempfile.mkdtemp() filename = 'Review_' + str(self.review) + '_Errors.gpkg' tempFile = '' if str(sys.platform) == 'win32': tempFile = tmpdir + '\\' + filename else: tempFile = tmpdir + '/' + filename schema = { 'geometry': 'Point', 'properties': [('deliv_type', 'str'), ('deliv_id', 'int'), ('type', 'str'), ('subtype', 'str'), ('desc', 'str'), ('resolved', 'str')] } crs = from_string('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs') with fiona.open(tempFile, 'w', layer='points', driver='GPKG', schema=schema, crs=crs) as dst: for error in self.errors: geometry = { 'type': 'Point', 'coordinates': [error['lon'], error['lat']] } feature = { 'geometry': geometry, 'properties': { 'deliv_type': error['delivType'], 'deliv_id': error['delivId'], 'type': error['type'], 'subtype': error['subtype'], 'desc': error['desc'], 'resolved': error['resolved'] } } dst.write(feature) with open(filename, 'rb') as fd: contents = fd.read() s = BytesIO(contents) return s, filename
def create_jsonfile(jsonfilename): import json import fiona from fiona.crs import from_string from fiona.tool import crs_uri with fiona.collection(os.path.join(DATADIR, FILES[0]), 'r') as source: features = [feat for feat in source] crs = ' '.join('+%s=%s' % (k,v) for k,v in source.crs.items()) my_layer = {'type': 'FeatureCollection', 'features': features, 'crs': { 'type': 'name', 'properties': { 'name':crs_uri(from_string(crs))}}} with open(jsonfilename, 'w') as f: f.write(json.dumps(my_layer))
def write_shapefile(data, proj): schema = { 'geometry': 'Polygon', 'properties': {'tileid': 'str', 'bounds': 'str'}, } crs = from_string(proj) with fiona.open(OUTPATH, 'w', 'ESRI Shapefile', schema, crs=crs) as shp: for tileid, coords in data: arr = np.array(coords) xmin, ymin = tuple(arr.min(axis=0)) xmax, ymax = tuple(arr.max(axis=0)) bounds = str((xmin, ymin, xmax, ymax)) poly = Polygon(coords) shp.write({ 'geometry': mapping(poly), 'properties': {'tileid': tileid, 'bounds': bounds}, })
def write_to_file(fc, filename, layer, driver, crs): # Define output file schema schema = { 'geometry': 'Polygon', 'properties': { 'class': 'str', 'code': 'int' } } # Write to file logger.info('Writing feature collection to file') crs = from_string(crs) with fiona.open(filename, 'w', encoding='utf-8', schema=schema, driver=driver, layer=layer, crs=crs) as dst: write = dst.write [write(feature) for feature in fc]
def guess_crs(thing): try: #is it a crs object itself? if thing.keys() and \ set(thing.keys()).issubset(set(crs.all_proj_keys)): return thing except AttributeError: pass try: # is it a collection or something else with a crs attr? return thing.crs except AttributeError: pass try: # if it's an int, use an epsg code epsg = int(thing) return crs.from_epsg(epsg) except ValueError: pass # finally try a string parser return crs.from_string(thing)
config = configparser.ConfigParser() config.read("config") db_user = config['DB']['db_user'] db_pass = config['DB']['db_pass'] db_host = config['DB']['db_host'] db_name = config['DB']['db_name'] print("DB Configuration:") print(f"DB User: {db_user}") print(f"DB Password: {db_pass}") print(f"DB Host: {db_host}") print(f"DB Name: {db_name}") # WGS84 Projection crs = from_string('+proj=longlat +ellps=WGS84 +datum=WGS84 +n_defs') # PG Stuff pg_connection = psycopg2.connect(database=db_name, user=db_user, password=db_pass, host=db_host) cursor = pg_connection.cursor() # Create the schema schema = { 'geometry': 'Point', 'properties': [('spot_id', 'int'), ('timestamp', 'datetime'), ('reporter', 'str'), ('reporter_grid', 'str'), ('snr', 'int'), ('frequency', 'int'), ('call_sign', 'str'),
def df2shp(dataframe, shpname, geo_column='geometry', index=False, retain_order=False, prj=None, epsg=None, proj_str=None, crs=None): """Write a DataFrame with a column of shapely geometries to a shapefile. Parameters ---------- dataframe : pandas.DataFrame shpname : str, filepath Output shapefile geo_column : str Name of column in dataframe with feature geometries (default 'geometry') index : bool If True, include the DataFrame index in the written shapefile retain_order : bool Retain column order in dataframe, using an OrderedDict. Shapefile will take about twice as long to write, since OrderedDict output is not supported by the pandas DataFrame object. prj : str Path to ESRI projection file describing the coordinate reference system of the feature geometries in the 'geometry' column. (specify one of prj, epsg, proj_str) epsg : int EPSG code describing the coordinate reference system of the feature geometries in the 'geometry' column. proj_str : str PROJ string describing the coordinate reference system of the feature geometries in the 'geometry' column. crs : obj A Python int, dict, str, or pyproj.crs.CRS instance passed to the pyproj.crs.from_user_input See http://pyproj4.github.io/pyproj/stable/api/crs/crs.html#pyproj.crs.CRS.from_user_input. Can be any of: - PROJ string - Dictionary of PROJ parameters - PROJ keyword arguments for parameters - JSON string with PROJ parameters - CRS WKT string - An authority string [i.e. 'epsg:4326'] - An EPSG integer code [i.e. 4326] - A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')] - An object with a `to_wkt` method. - A :class:`pyproj.crs.CRS` class Returns ------- writes a shapefile to shpname """ # first check if output path exists output_folder = os.path.abspath(os.path.split(shpname)[0]) if os.path.split(shpname)[0] != '' and not os.path.isdir(output_folder): raise IOError("Output folder doesn't exist:\n{}".format(output_folder)) # check for empty dataframe if len(dataframe) == 0: raise IndexError("DataFrame is empty!") df = dataframe.copy() # make a copy so the supplied dataframe isn't edited # reassign geometry column if geo_column is special (e.g. something other than "geometry") if geo_column != 'geometry': df['geometry'] = df[geo_column] df.drop(geo_column, axis=1, inplace=True) # assign none for geometry, to write a dbf file from dataframe Type = None if 'geometry' not in df.columns: df['geometry'] = None Type = 'None' mapped = [None] * len(df) # reset the index to integer index to enforce ordering # retain index as attribute field if index=True df.reset_index(inplace=True, drop=not index) # enforce 10 character limit df.columns = rename_fields_to_10_characters(df.columns) properties = shp_properties(df) del properties['geometry'] # set projection (or use a prj file, which must be copied after shp is written) # alternatively, provide a crs in dictionary form as read using fiona # from a shapefile like fiona.open(inshpfile).crs crs_wkt = None if epsg is not None: warnings.warn( 'gisutils.df2shp: the epsg argument is deprecated; use crs instead', DeprecationWarning) from fiona.crs import from_epsg crs = from_epsg(int(epsg)) elif proj_str is not None: warnings.warn( 'gisutils.df2shp: the proj_str argument is deprecated; use crs instead', DeprecationWarning) from fiona.crs import from_string crs = from_string(proj_str) elif crs is not None: proj_crs = get_authority_crs(crs) # https://pyproj4.github.io/pyproj/stable/crs_compatibility.html#converting-from-pyproj-crs-crs-for-fiona if LooseVersion(fiona.__gdal_version__) < LooseVersion("3.0.0"): crs_wkt = proj_crs.to_wkt(WktVersion.WKT1_GDAL) else: # GDAL 3+ can use WKT2 crs_wkt = proj_crs.to_wkt() crs = None else: pass if Type != 'None': for g in df.geometry: try: Type = g.type except: continue mapped = [mapping(g) for g in df.geometry] schema = {'geometry': Type, 'properties': properties} length = len(df) if not retain_order: props = df.drop('geometry', axis=1).astype(object).to_dict(orient='records') else: props = [ collections.OrderedDict(r) for i, r in df.drop('geometry', axis=1).astype(object).iterrows() ] print('writing {}...'.format(shpname), end='') #with fiona.collection(shpname, "w", driver="ESRI Shapefile", crs=crs, crs_wkt=crs_wkt, schema=schema) as output: with fiona.open(shpname, "w", driver="ESRI Shapefile", crs=crs, crs_wkt=crs_wkt, schema=schema) as output: for i in range(length): output.write({'properties': props[i], 'geometry': mapped[i]}) if prj is not None: try: print('copying {} --> {}...'.format(prj, "{}.prj".format(shpname[:-4]))) shutil.copyfile(prj, "{}.prj".format(shpname[:-4])) except IOError: print( 'Warning: could not find specified prj file. shp will not be projected.' ) print(' Done')
def test_wktext(): """Test +wktext parameter is preserved.""" proj4 = ('+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 ' '+x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext ' '+no_defs') assert 'wktext' in crs.from_string(proj4)
print(small_rect) print(big_rect) schema = { "geometry": "Polygon", "properties": {"id": "int"}, } # Write a small retangle Shapefile with fiona.open( "small_rect.shp", "w", "ESRI Shapefile", schema, crs=from_string( "+proj=aea +lat_0=23 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs" ), ) as c: c.write( { "geometry": mapping(small_rect), "properties": {"id": 1}, } ) # Write a big retangle Shapefile with fiona.open( "big_rect.shp", "w", "ESRI Shapefile", schema,
def df2shp(dataframe, shpname, geo_column='geometry', index=False, retain_order=False, prj=None, epsg=None, proj4=None, crs=None): ''' Write a DataFrame to a shapefile dataframe: dataframe to write to shapefile geo_column: optional column containing geometry to write - default is 'geometry' index: If true, write out the dataframe index as a column retain_order : boolean Retain column order in dataframe, using an OrderedDict. Shapefile will take about twice as long to write, since OrderedDict output is not supported by the pandas DataFrame object. --->there are four ways to specify the projection....choose one prj: <file>.prj filename (string) epsg: EPSG identifier (integer) proj4: pyproj style projection string definition crs: crs attribute (dictionary) as read by fiona ''' # first check if output path exists if os.path.split(shpname)[0] != '' and not os.path.isdir(os.path.split(shpname)[0]): raise IOError("Output folder doesn't exist") # check for empty dataframe if len(dataframe) == 0: raise IndexError("DataFrame is empty!") df = dataframe.copy() # make a copy so the supplied dataframe isn't edited # reassign geometry column if geo_column is special (e.g. something other than "geometry") if geo_column != 'geometry': df['geometry'] = df[geo_column] df.drop(geo_column, axis=1, inplace=True) # assign none for geometry, to write a dbf file from dataframe Type = None if 'geometry' not in df.columns: df['geometry'] = None Type = 'None' mapped = [None] * len(df) # reset the index to integer index to enforce ordering # retain index as attribute field if index=True df.reset_index(inplace=True, drop=not index) # enforce character limit for names! (otherwise fiona marks it zero) # somewhat kludgey, but should work for duplicates up to 99 df.columns = list(map(str, df.columns)) # convert columns to strings in case some are ints overtheline = [(i, '{}{}'.format(c[:8],i)) for i, c in enumerate(df.columns) if len(c) > 10] newcolumns = list(df.columns) for i, c in overtheline: newcolumns[i] = c df.columns = newcolumns properties = shp_properties(df) del properties['geometry'] # set projection (or use a prj file, which must be copied after shp is written) # alternatively, provide a crs in dictionary form as read using fiona # from a shapefile like fiona.open(inshpfile).crs if epsg is not None: from fiona.crs import from_epsg crs = from_epsg(int(epsg)) elif proj4 is not None: from fiona.crs import from_string crs = from_string(proj4) elif crs is not None: pass else: pass if Type != 'None': for g in df.geometry: try: Type = g.type except: continue mapped = [mapping(g) for g in df.geometry] schema = {'geometry': Type, 'properties': properties} length = len(df) if not retain_order: props = df.drop('geometry', axis=1).astype(object).to_dict(orient='records') else: props = [OrderedDict(r) for i, r in df.drop('geometry', axis=1).astype(object).iterrows()] print('writing {}...'.format(shpname)) with fiona.collection(shpname, "w", driver="ESRI Shapefile", crs=crs, schema=schema) as output: for i in range(length): output.write({'properties': props[i], 'geometry': mapped[i]}) if prj is not None: """ if 'epsg' in prj.lower(): epsg = int(prj.split(':')[1]) prjstr = getPRJwkt(epsg).replace('\n', '') # get rid of any EOL ofp = open("{}.prj".format(shpname[:-4]), 'w') ofp.write(prjstr) ofp.close() """ try: print('copying {} --> {}...'.format(prj, "{}.prj".format(shpname[:-4]))) shutil.copyfile(prj, "{}.prj".format(shpname[:-4])) except IOError: print('Warning: could not find specified prj file. shp will not be projected.')
def _set_crs(self): if self.crs is None and self.proj4 is not None: self.crs = from_string(self.proj4)
from fiona.crs import from_epsg,from_string import matplotlib.pyplot as plt from geopandas import GeoSeries from shapely.geometry import Point from shapely.geometry import LineString import numpy as np ################## shapefile ################ shp = r'C:\Users\shaoqi_i\Desktop\osgeopy-data\global\ne_110m_admin_0_countries.shp' shp_df = geopandas.GeoDataFrame.from_file(shp) ################## set Bonne projection ################ ESRI_Bonne = """+proj=bonne +lon_0=0 +lat_1=35""" ################## transform the shp projection into Bonne ################ shp_df_Bonne = shp_df.to_crs(from_string(ESRI_Bonne)) base=shp_df_Bonne.plot(cmap='Set1',k=6,alpha=0.8,edgecolor='black') ################## plot latitude and longitude ################ ################## set latlon projection ################ p1 = Proj(init='epsg:4326') p2= Proj("+proj=bonne +lon_0=0 +lat_1=35") ################## latitude -90~90,step=30 ################ for i in range(-90,91,30): lat_Bonne=[] lat=zip(list(np.arange(-180,181)),[i]*361) for j in lat: ########## coordinates transform ########### x,y=transform(p1,p2,j[0],j[1])
def __init__(self, fname): if not fiona: raise ImportError('fiona') self.fname = fname self.crs = crs.from_string('+proj=longlat +ellps=WGS84 ' '+datum=WGS84 +no_defs')
def subset_shape( ds: Union[xarray.DataArray, xarray.Dataset], shape: Union[str, Path, gpd.GeoDataFrame], raster_crs: Optional[Union[str, int]] = None, shape_crs: Optional[Union[str, int]] = None, buffer: Optional[Union[int, float]] = None, wrap_lons: Optional[bool] = None, start_date: Optional[str] = None, end_date: Optional[str] = None, ) -> Union[xarray.DataArray, xarray.Dataset]: """Subset a DataArray or Dataset spatially (and temporally) using a vector shape and date selection. Return a subsetted data array for grid points falling within the area of a Polygon and/or MultiPolygon shape, or grid points along the path of a LineString and/or MultiLineString. Parameters ---------- ds : Union[xarray.DataArray, xarray.Dataset] Input values. shape : Union[str, Path, gpd.GeoDataFrame] Path to shape file, or directly a geodataframe. Supports formats compatible with geopandas. raster_crs : Optional[Union[str, int]] EPSG number or PROJ4 string. shape_crs : Optional[Union[str, int]] EPSG number or PROJ4 string. buffer : Optional[Union[int, float]] Buffer the shape in order to select a larger region stemming from it. Units are based on the shape degrees/metres. wrap_lons: Optional[bool] Manually set whether vector longitudes should extend from 0 to 360 degrees. start_date : Optional[str] Start date of the subset. Date string format -- can be year ("%Y"), year-month ("%Y-%m") or year-month-day("%Y-%m-%d"). Defaults to first day of input data-array. end_date : Optional[str] End date of the subset. Date string format -- can be year ("%Y"), year-month ("%Y-%m") or year-month-day("%Y-%m-%d"). Defaults to last day of input data-array. Returns ------- Union[xarray.DataArray, xarray.Dataset] A subsetted copy of `ds` Examples -------- >>> from xclim import subset >>> import xarray as xr >>> pr = xarray.open_dataset('pr.day.nc').pr Subset data array by shape and multiple years >>> prSub = subset.subset_shape(pr, shape="/path/to/polygon.shp", start_yr='1990', end_yr='1999') Subset data array by shape and single year >>> prSub = subset.subset_shape(pr, shape="/path/to/polygon.shp", start_yr='1990', end_yr='1990') Subset multiple variables in a single dataset >>> ds = xarray.open_mfdataset(['pr.day.nc','tas.day.nc']) >>> dsSub = subset.subset_shape(ds, shape="/path/to/polygon.shp", start_yr='1990', end_yr='1999') # Subset with year-month precision - Example subset 1990-03-01 to 1999-08-31 inclusively >>> prSub = subset.subset_shape(ds.pr, shape="/path/to/polygon.shp", start_date='1990-03', end_date='1999-08') # Subset with specific start_dates and end_dates >>> prSub = \ subset.subset_shape(ds.pr, shape="/path/to/polygon.shp", start_date='1990-03-13', end_date='1990-08-17') """ # TODO : edge case using polygon splitting decorator touches original ds when subsetting? if isinstance(ds, xarray.DataArray): ds_copy = copy.deepcopy(ds._to_temp_dataset()) else: ds_copy = copy.deepcopy(ds) if isinstance(shape, gpd.GeoDataFrame): poly = shape.copy() else: poly = gpd.GeoDataFrame.from_file(shape) if buffer is not None: poly.geometry = poly.buffer(buffer) # Get the shape's bounding box bounds = poly.bounds lon_bnds = (float(bounds.minx.values), float(bounds.maxx.values)) lat_bnds = (float(bounds.miny.values), float(bounds.maxy.values)) # If polygon doesn't cross prime meridian, subset bbox first to reduce processing time # Only case not implemented is when lon_bnds cross the 0 deg meridian but dataset grid has all positive lons try: ds_copy = subset_bbox(ds_copy, lon_bnds=lon_bnds, lat_bnds=lat_bnds) except NotImplementedError: pass if ds_copy.lon.size == 0 or ds_copy.lat.size == 0: raise ValueError( "No gridcell centroids found within provided polygon bounding box. " 'Try using the "buffer" option to create an expanded area') if start_date or end_date: ds_copy = subset_time(ds_copy, start_date=start_date, end_date=end_date) # Determine whether CRS types are the same between shape and raster if shape_crs is not None: try: shape_crs = fiocrs.from_epsg(shape_crs) except ValueError: try: shape_crs = fiocrs.from_string(shape_crs) except ValueError: raise else: shape_crs = poly.crs if raster_crs is not None: try: raster_crs = fiocrs.from_epsg(raster_crs) except ValueError: try: raster_crs = fiocrs.from_string(raster_crs) except ValueError: raise else: if np.min(ds_copy.lon) >= 0 and np.max(ds_copy.lon) <= 360: # PROJ4 definition for WGS84 with Prime Meridian at -180 deg lon. raster_crs = fiocrs.from_string( "+proj=longlat +ellps=WGS84 +lon_wrap=180 +datum=WGS84 +no_defs" ) wrap_lons = True else: raster_crs = fiocrs.from_epsg(4326) wrap_lons = False if (shape_crs != raster_crs) or (fiocrs.from_epsg(4326) not in [shape_crs, raster_crs]): warnings.warn( "CRS definitions are not similar or both not using WGS84. Caveat emptor.", UserWarning, stacklevel=3, ) mask_2d = create_mask(x_dim=ds_copy.lon, y_dim=ds_copy.lat, poly=poly, wrap_lons=wrap_lons) if np.all(mask_2d.isnull()): raise ValueError( "No gridcell centroids found within provided polygon. " 'Try using the "buffer" option to create an expanded areas or verify polygon ' ) # loop through variables for v in ds_copy.data_vars: if set.issubset(set(mask_2d.dims), set(ds_copy[v].dims)): ds_copy[v] = ds_copy[v].where(mask_2d.notnull()) # Remove coordinates where all values are outside of region mask for dim in mask_2d.dims: mask_2d = mask_2d.dropna(dim, how="all") ds_copy = ds_copy.sel({dim: mask_2d[dim] for dim in mask_2d.dims}) # Add a CRS definition as a coordinate for reference purposes if wrap_lons: ds_copy.coords["crs"] = 0 ds_copy.coords["crs"].attrs = dict( spatial_ref= "+proj=longlat +ellps=WGS84 +lon_wrap=180 +datum=WGS84 +no_defs") if isinstance(ds, xarray.DataArray): return ds._from_temp_dataset(ds_copy) return ds_copy
import json from sklearn.cluster import DBSCAN from sklearn import datasets import matplotlib.pyplot as plt %matplotlib inline from sklearn.cluster import KMeans from scipy.spatial.distance import cdist import warnings warnings.filterwarnings(action='ignore') import folium import os os.getcwd() os.chdir('D:/부동산 빅데이터 분석 스터디/아파트 찾기 프로젝트 수정') epsg5178 = from_string('+proj=tmerc +lat_0=38 +lon_0=127.5 +k=0.9996 +x_0=1000000 +y_0=2000000 +ellps=bessel +units=m +no_defs +towgs84=-115.80,474.99,674.11,1.16,-2.31,-1.63,6.43') epsg4326 = from_string("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") # from_string은 좌표 지정할 때 쓰는 코드 epsg5174 = from_string("+proj=tmerc +lat_0=38 +lon_0=127.0028902777778 +k=1 +x_0=200000 +y_0=500000 +ellps=bessel +units=m +no_defs +towgs84=-115.80,474.99,674.11,1.16,-2.31,-1.63,6.43") #epsg5179 = from_string("+proj=tmerc +lat_0=38 +lon_0=127.5 +k=0.9996 +x_0=1000000 +y_0=2000000 +ellps=GRS80 +units=m +no_defs") #epsg5181 = from_string("+proj=tmerc +lat_0=38 +lon_0=127 +k=1 +x_0=200000 +y_0=500000 +ellps=GRS80 +units=m +no_defs") epsg5181_qgis = from_string("+proj=tmerc +lat_0=38 +lon_0=127 +k=1 +x_0=200000 +y_0=500000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs") # qgis 좌표, 보정좌표값 존재 #epsg5186 = from_string("+proj=tmerc +lat_0=38 +lon_0=127 +k=1 +x_0=200000 +y_0=600000 +ellps=GRS80 +units=m +no_defs") epsg2097 = from_string("+proj=tmerc +lat_0=38 +lon_0=127 +k=1 +x_0=200000 +y_0=500000 +ellps=bessel +units=m +no_defs +towgs84=-115.80,474.99,674.11,1.16,-2.31,-1.63,6.43") cc=CRS({'init':'epsg:4326', 'no_defs':True}) #%% # 지하철 이름, 좌표가 담긴 shp 입력
def create_mask( *, x_dim: xarray.DataArray = None, y_dim: xarray.DataArray = None, poly: gpd.GeoDataFrame = None, wrap_lons: bool = False, ): """Creates a mask with values corresponding to the features in a GeoDataFrame. The returned mask's points have the value of the first geometry of `poly` they fall in. Parameters ---------- x_dim : xarray.DataArray X or longitudinal dimension of xarray object. y_dim : xarray.DataArray Y or latitudinal dimension of xarray object. poly : gpd.GeoDataFrame GeoDataFrame used to create the xarray.DataArray mask. wrap_lons : bool Shift vector longitudes by -180,180 degrees to 0,360 degrees; Default = False Returns ------- xarray.DataArray Examples -------- >>> from xclim import subset >>> import xarray as xr >>> import geopandas as gpd >>> ds = xr.open_dataset('example.nc') >>> polys = gpd.read_file('regions.json') Get a mask from all polygons in 'regions.json' >>> mask = subset.create_mask(x_dim=ds.lon, y_dim=ds.lat, poly=polys) >>> ds = ds.assign_coords(regions=mask) Operations can be applied to each regions with `groupby`. Ex: >>> ds = ds.groupby('regions').mean() Extra step to retrieve the names of those polygons stored in the "id" column >>> region_names = xr.DataArray(polys.id, dims=('regions',))) >>> ds = ds.assign_coords(regions_names=region_names) """ # Check for intersections for i, (inda, pola) in enumerate(poly.iterrows()): for (indb, polb) in poly.iloc[i + 1:].iterrows(): if pola.geometry.intersects(polb.geometry): warnings.warn( f"List of shapes contains overlap between {inda} and {indb}. Points will be assigned to {inda}.", UserWarning, stacklevel=4, ) if len(x_dim.shape) == 1 & len(y_dim.shape) == 1: # create a 2d grid of lon, lat values lon1, lat1 = np.meshgrid(np.asarray(x_dim.values), np.asarray(y_dim.values), indexing="ij") dims_out = x_dim.dims + y_dim.dims coords_out = dict() coords_out[dims_out[0]] = x_dim.values coords_out[dims_out[1]] = y_dim.values else: lon1 = x_dim.values lat1 = y_dim.values dims_out = x_dim.dims coords_out = x_dim.coords # create pandas Dataframe from NetCDF lat and lon points df = pd.DataFrame({ "id": np.arange(0, lon1.size), "lon": lon1.flatten(), "lat": lat1.flatten() }) df["Coordinates"] = list(zip(df.lon, df.lat)) df["Coordinates"] = df["Coordinates"].apply(Point) # create geodataframe (spatially referenced with shifted longitude values if needed). if wrap_lons: shifted = fiocrs.from_string( "+proj=longlat +ellps=WGS84 +lon_wrap=180 +datum=WGS84 +no_defs") gdf_points = gpd.GeoDataFrame(df, geometry="Coordinates", crs=shifted) else: gdf_points = gpd.GeoDataFrame(df, geometry="Coordinates", crs=fiocrs.from_epsg(4326)) # spatial join geodata points with region polygons and remove duplicates point_in_poly = gpd.tools.sjoin(gdf_points, poly, how="left", op="intersects") point_in_poly = point_in_poly.loc[~point_in_poly.index.duplicated( keep="first")] # extract polygon ids for points mask = point_in_poly["index_right"] mask_2d = np.array(mask).reshape(lat1.shape[0], lat1.shape[1]) mask_2d = xarray.DataArray(mask_2d, dims=dims_out, coords=coords_out) return mask_2d
def get_crs(layerDict): crsList = [] for name in layerDict: with fiona.open(layerDict[name]['file']) as s: crsList.append(to_string(s.crs)) return from_string(Counter(crsList).most_common()[0][0])
LOGGER.debug( '\n\nEPSG lookup found {} matches. Attempting to refine by comparing proj4 strings' .format(len(jres['codes']))) for i in reversed(range(len(jres['codes']))): epsg = None tmpSrs = osr.SpatialReference() res = tmpSrs.ImportFromEPSG( int(jres['codes'][i]['code'])) if res != 0: raise RuntimeError( repr(res) + ': could not import from EPSG') # create a dictionary mapping using fiona.crs.from_string to ensure elements are in # the same order. tmpProj4Dict = from_string(tmpSrs.ExportToProj4()) if from_string( osr_srs.ExportToProj4()) == tmpProj4Dict: epsg = jres['codes'][i]['code'] else: # remove towgs84 value if all 0's as it is not always implemented yet for gda2020 if 'towgs84' in tmpProj4Dict: if tmpProj4Dict['towgs84'] == '0,0,0,0,0,0,0': del tmpProj4Dict['towgs84'] if from_string( osr_srs.ExportToProj4()) == tmpProj4Dict: epsg = jres['codes'][i]['code'] if epsg is None:
def df2shp(dataframe, shpname, geo_column='geometry', index=False, prj=None, epsg=None, proj4=None, crs=None): ''' Write a DataFrame to a shapefile dataframe: dataframe to write to shapefile geo_column: optional column containing geometry to write - default is 'geometry' index: If true, write out the dataframe index as a column --->there are four ways to specify the projection....choose one prj: <file>.prj filename (string) epsg: EPSG identifier (integer) proj4: pyproj style projection string definition crs: crs attribute (dictionary) as read by fiona ''' df = dataframe.copy() # make a copy so the supplied dataframe isn't edited # reassign geometry column if geo_column is special (e.g. something other than "geometry") if geo_column != 'geometry': df['geometry'] = df[geo_column] df.drop(geo_column, axis=1, inplace=True) # assign none for geometry, to write a dbf file from dataframe if 'geometry' not in df.columns: df['geometry'] = None # include index in shapefile as an attribute field if index: if df.index.name is None: df.index.name = 'index' df[df.index.name] = df.index # enforce character limit for names! (otherwise fiona marks it zero) # somewhat kludgey, but should work for duplicates up to 99 df.columns = list(map(str, df.columns)) # convert columns to strings in case some are ints overtheline = [(i, '{}{}'.format(c[:8],i)) for i, c in enumerate(df.columns) if len(c) > 10] newcolumns = list(df.columns) for i, c in overtheline: newcolumns[i] = c df.columns = newcolumns properties = shp_properties(df) del properties['geometry'] # sort the dataframe columns (so that properties coincide) df = df.sort(axis=1) # set projection (or use a prj file, which must be copied after shp is written) # alternatively, provide a crs in dictionary form as read using fiona # from a shapefile like fiona.open(inshpfile).crs if epsg is not None: from fiona.crs import from_epsg crs = from_epsg(int(epsg)) elif proj4 is not None: from fiona.crs import from_string crs = from_string(proj4) elif crs is not None: pass else: pass if df.iloc[0]['geometry'] is not None: Type = df.iloc[0]['geometry'].type else: Type = None schema = {'geometry': Type, 'properties': properties} length = len(df) props = df.drop('geometry', axis=1).to_dict(orient='records') mapped = [mapping(g) for g in df.geometry] print('writing {}...'.format(shpname)) with fiona.collection(shpname, "w", driver="ESRI Shapefile", crs=crs, schema=schema) as output: for i in range(length): output.write({'properties': props[i], 'geometry': mapped[i]}) if prj is not None: """ if 'epsg' in prj.lower(): epsg = int(prj.split(':')[1]) prjstr = getPRJwkt(epsg).replace('\n', '') # get rid of any EOL ofp = open("{}.prj".format(shpname[:-4]), 'w') ofp.write(prjstr) ofp.close() """ try: print('copying {} --> {}...'.format(prj, "{}.prj".format(shpname[:-4]))) shutil.copyfile(prj, "{}.prj".format(shpname[:-4])) except IOError: print('Warning: could not find specified prj file. shp will not be projected.')
""" Created on Tue May 11 01:56:26 2021 @author: user """ import pandas as pd import geopandas as gpd from shapely.geometry import Point, MultiPoint from shapely.geometry import MultiPolygon, JOIN_STYLE import matplotlib.pyplot as plt from tqdm import tqdm from fiona.crs import from_string from pyproj import CRS epsg5181_qgis = from_string("+proj=tmerc +lat_0=38 +lon_0=127 +k=1 +x_0=200000 +y_0=500000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs") school=pd.read_csv(r'D:\부동산 빅데이터 분석 스터디\new\서울특별시 학교 기본정보.csv', encoding='cp949') school=school.drop_duplicates(subset='표준학교코드') school=school[~school['학교명'].str.contains('미사용')] school=school[school['학교종류명'].str.contains('초등학교|중학교|고등학교')] school=school[(school['학교종류명']=='초등학교') | (school['학교종류명']=='중학교') | (school['학교종류명']=='고등학교')] school=school[school['관할조직명']!='교육부'] def find_xy(searching) : url= "https://dapi.kakao.com/v2/local/search/keyword.json?query={}".format(searching) headers={"Authorization": "KakaoAK 1f26ccd78d132c1a8df33f46e92cabce"} places=requests.get(url,headers=headers).json()['documents'] try :
def __init__(self, in_proj): from fiona import crs self.in_crs = crs.from_string(in_proj)
from pprint import pprint with fiona.open('/gdata/world_borders.shp') as src: pprint(src[1]) ############################################################################### try: with fiona.open('/gdata/world_borders.shp') as c: print(len(list(c))) # assert True is False except: print(c.closed) raise ############################################################################### import fiona c = fiona.open('/gdata/world_borders.shp') c.driver ############################################################################### c.crs ############################################################################### from fiona.crs import to_string print(to_string(c.crs)) ############################################################################### from fiona.crs import from_string from_string("+datum=WGS84 +ellps=WGS84 +no_defs +proj=longlat") ############################################################################### from fiona.crs import from_epsg from_epsg(3857) ############################################################################### len(c) ############################################################################### c.bounds
import os #import logging #from pyproj import Proj, transform import configparser config = configparser.ConfigParser() config.read('../app.conf') from mongoengine.connection import connect,disconnect connection=connect(config["MONGODB"]["DB_NAME"]) from hotelapp.models import TelefonicaMap,Property #features = [] crs = None with fiona.collection(config["DEFAULT"]["DC_BASE_DIR"]+"data/telefonica/MGrid_WKT_Bizkaia_WGS84/MGrid_WKT_Bizkaia_WGS84.shp", "r") as source: crsdict = from_string("+datum=WGS84 +ellps=WGS84 +no_defs +proj=longlat") for feat in source: #feat['properties'].update(...) # with your attributes #print(feat) #features.append(feat) tm=TelefonicaMap() tm.geometry=feat['geometry'] tm.type=feat['type'] tm.featureID=int(feat['id']) tm.save() prop=Property() prop.WKT=feat['properties']['WKT'] prop.cell_id=int(float(feat['properties']['field_1'])) prop.field_2=int(feat['properties']['field_2']) prop.field_3=float(feat['properties']['field_3'])
def df2shp(dataframe, shpname, geo_column='geometry', index=False, retain_order=False, prj=None, epsg=None, proj4=None, crs=None): ''' Write a DataFrame to a shapefile dataframe: dataframe to write to shapefile geo_column: optional column containing geometry to write - default is 'geometry' index: If true, write out the dataframe index as a column retain_order : boolean Retain column order in dataframe, using an OrderedDict. Shapefile will take about twice as long to write, since OrderedDict output is not supported by the pandas DataFrame object. --->there are four ways to specify the projection....choose one prj: <file>.prj filename (string) epsg: EPSG identifier (integer) proj4: pyproj style projection string definition crs: crs attribute (dictionary) as read by fiona ''' # first check if output path exists if os.path.split(shpname)[0] != '' and not os.path.isdir( os.path.split(shpname)[0]): raise IOError("Output folder doesn't exist") # check for empty dataframe if len(dataframe) == 0: raise IndexError("DataFrame is empty!") df = dataframe.copy() # make a copy so the supplied dataframe isn't edited # reassign geometry column if geo_column is special (e.g. something other than "geometry") if geo_column != 'geometry': df['geometry'] = df[geo_column] df.drop(geo_column, axis=1, inplace=True) # assign none for geometry, to write a dbf file from dataframe Type = None if 'geometry' not in df.columns: df['geometry'] = None Type = 'None' mapped = [None] * len(df) # reset the index to integer index to enforce ordering # retain index as attribute field if index=True df.reset_index(inplace=True, drop=not index) # enforce character limit for names! (otherwise fiona marks it zero) # somewhat kludgey, but should work for duplicates up to 99 df.columns = list(map( str, df.columns)) # convert columns to strings in case some are ints overtheline = [(i, '{}{}'.format(c[:8], i)) for i, c in enumerate(df.columns) if len(c) > 10] newcolumns = list(df.columns) for i, c in overtheline: newcolumns[i] = c df.columns = newcolumns properties = shp_properties(df) del properties['geometry'] # set projection (or use a prj file, which must be copied after shp is written) # alternatively, provide a crs in dictionary form as read using fiona # from a shapefile like fiona.open(inshpfile).crs if epsg is not None: from fiona.crs import from_epsg crs = from_epsg(int(epsg)) elif proj4 is not None: from fiona.crs import from_string crs = from_string(proj4) elif crs is not None: pass else: pass if Type != 'None': for g in df.geometry: try: Type = g.type except: continue mapped = [mapping(g) for g in df.geometry] schema = {'geometry': Type, 'properties': properties} length = len(df) if not retain_order: props = df.drop('geometry', axis=1).astype(object).to_dict(orient='records') else: props = [ OrderedDict(r) for i, r in df.drop('geometry', axis=1).astype(object).iterrows() ] print('writing {}...'.format(shpname)) with fiona.collection(shpname, "w", driver="ESRI Shapefile", crs=crs, schema=schema) as output: for i in range(length): output.write({'properties': props[i], 'geometry': mapped[i]}) if prj is not None: """ if 'epsg' in prj.lower(): epsg = int(prj.split(':')[1]) prjstr = getPRJwkt(epsg).replace('\n', '') # get rid of any EOL ofp = open("{}.prj".format(shpname[:-4]), 'w') ofp.write(prjstr) ofp.close() """ try: print('copying {} --> {}...'.format(prj, "{}.prj".format(shpname[:-4]))) shutil.copyfile(prj, "{}.prj".format(shpname[:-4])) except IOError: print( 'Warning: could not find specified prj file. shp will not be projected.' )
def crs(self): return from_string(self.proj4)