示例#1
0
def test_remove_layer(tmpdir):
    filename = str(tmpdir.join("a_filename.gpkg"))
    create_sample_data(filename, "GPKG", layer="layer1")
    create_sample_data(filename, "GPKG", layer="layer2")
    create_sample_data(filename, "GPKG", layer="layer3")
    create_sample_data(filename, "GPKG", layer="layer4")
    assert fiona.listlayers(filename) == ["layer1", "layer2", "layer3", "layer4"]
    
    # remove by index
    fiona.remove(filename, layer=2)
    assert fiona.listlayers(filename) == ["layer1", "layer2", "layer4"]
    
    # remove by name
    fiona.remove(filename, layer="layer2")
    assert fiona.listlayers(filename) == ["layer1", "layer4"]
    
    # remove by negative index
    fiona.remove(filename, layer=-1)
    assert fiona.listlayers(filename) == ["layer1"]
    
    # invalid layer name
    with pytest.raises(ValueError):
        fiona.remove(filename, layer="invalid_layer_name")
    
    # invalid layer index
    with pytest.raises(DatasetDeleteError):
        fiona.remove(filename, layer=999)
示例#2
0
    def inspect(self, input_path):

        archive = "tar" if tarfile.is_tarfile(input_path) else "zip" if zipfile.is_zipfile(input_path) else None
        if archive:
            archive_location = "{f}:///{path}".format(f=archive, path=input_path)
            layer_names = fiona.listlayers("", vfs=archive_location)
            layers = [fiona.open(layer_name, vfs=archive_location) for layer_name in layer_names]

        else:
            layer_names = fiona.listlayers(input_path)
            layers = [fiona.open(layer_name, input_path) for layer_name in layer_names]

        return [layer.meta for layer in layers]
示例#3
0
文件: mixins.py 项目: wq/wq.io
 def load(self):
     try:
         self.layers = fiona.listlayers(self.filename)
     except (ValueError, IOError):
         driver = guess_driver(self.filename)
         self.meta = {'driver': driver}
         self.empty_file = True
示例#4
0
def read(fp, prop_map):
    """Read shapefile.

    :param fp: file-like object
    """
    layers = fiona.listlayers('/', vfs='zip://' + fp.name)

    if not layers:
        raise IOError

    filename = '/' + layers[0] + '.shp'

    with fiona.open(filename, vfs='zip://' + fp.name) as source:
        collection = {
            'type': 'FeatureCollection',
            'features': [],
            'bbox': [float('inf'), float('inf'), float('-inf'), float('-inf')]
        }

        for rec in source:
            transformed = _transformer(source.crs, rec)
            transformed['properties'] = {
                key: str(transformed['properties'][value])
                for key, value in prop_map.iteritems()
            }
            collection['bbox'] = [
                comparator(values)
                for comparator, values in zip(
                    [min, min, max, max],
                    zip(collection['bbox'], _bbox(transformed))
                )
            ]
            collection['features'].append(transformed)

    return collection
示例#5
0
def test_remove_layer(tmpdir):
    filename = str(tmpdir.join("a_filename.gpkg"))
    create_sample_data(filename, "GPKG", layer="layer1")
    create_sample_data(filename, "GPKG", layer="layer2")
    assert fiona.listlayers(filename) == ["layer1", "layer2"]

    result = CliRunner().invoke(main_group, [
        "rm",
        filename,
        "--layer", "layer2",
        "--yes"
    ])
    print(result.output)
    assert result.exit_code == 0
    assert os.path.exists(filename)
    assert fiona.listlayers(filename) == ["layer1"]
示例#6
0
 def test_pg2gpkg_update(self):
     db = DB
     db.pg2ogr(sql='SELECT * FROM pgdata.bc_airports LIMIT 10', driver='GPKG',
               outfile=os.path.join(self.tempdir, 'test_dump.gpkg'),
               outlayer='bc_airports')
     db.pg2ogr(sql='SELECT * FROM pgdata.bc_airports LIMIT 10', driver='GPKG',
               outfile=os.path.join(self.tempdir, 'test_dump.gpkg'),
               outlayer='bc_airports_2')
     layers = fiona.listlayers(os.path.join(self.tempdir, 'test_dump.gpkg'))
     assert len(layers) == 2
示例#7
0
文件: ls.py 项目: perrygeo/Fiona
def ls(ctx, input, indent):

    """
    List layers in a datasource.
    """

    verbosity = (ctx.obj and ctx.obj['verbosity']) or 2

    with fiona.drivers(CPL_DEBUG=verbosity > 2):
        result = fiona.listlayers(input)
        click.echo(json.dumps(result, indent=indent))
示例#8
0
文件: cli.py 项目: nerik/gj2ascii
def _cb_infile(ctx, param, value):

    """
    Click callback to validate infile. Let the user specify a datasource and its
    layers in a single argument.

    Example usage:

        Render all layers in a datasource
        $ gj2ascii sample-data/multilayer-polygon-line

        Render layer with name 'polygons' in a multilayer datasource
        $ gj2ascii sample-data/multilayer-polygon-line,polygons

        # Render two layers in a specific order in a multilayer datasource
        $ gj2ascii sample-data/multilayer-polygon-line,lines,polygons

        # Render layers from multiple files
        $ gj2ascii sample-data/polygons.geojson sample-data/multilayer-polygon-line,lines

    Example output:

        [('sample-data/multilayer-polygon-line', ['polygons', 'lines'])]

        [('sample-data/multilayer-polygon-line', ['polygons'])]

        [('sample-data/multilayer-polygon-line', ['lines', 'polygons'])]

        [
            ('sample-data/polygons.geojson', ['polygons']),
            ('sample-data/multilayer-polygon-line', ['lines'])
        ]

    Returns
    -------
    list
        A list of tuples where the first element of each tuple is the datasource
        and the second is a list of layers to render.
    """

    output = []
    for ds_layers in value:
        _split = ds_layers.split(',')
        ds = _split[0]
        layers = _split[1:]
        if ds != '-' and (len(layers) is 0 or '%all' in layers):
            layers = fio.listlayers(ds)
        elif ds == '-':
            layers = [None]
        output.append((ds, layers))

    return output
示例#9
0
def unpack_layers (username, project_name):
    try:
        #TODO: This still uses the file system
        user, project = Project.get_user_and_project(username, project_name)
        clear_uploads(SHP_DIR)
        zip_contents = StringIO(project.crane_project.zipfile.read())
        unique_name = str(uuid.uuid1())
        zipfile = SHP_DIR + '/' + unique_name +'.zip'
        with open(zipfile, "wb") as f:
            f.write(zip_contents.getvalue())
        #TODO: keep track of the data types.
        print " AT FIONA DRIVERS"
        messages = []
        project.crane_project.status = "Reading shapefiles."
        project.save(cascade = True)
        print globals()
        with fiona.drivers():
            for i, layername in enumerate(
                fiona.listlayers(
                '/',
                vfs='zip://'+zipfile)):
                feature = GeoFeat()
                feature.read_shapefile(layername, zipfile)
                feature.name = layername
                #TODO: This just leaves shitty layers out of the project, you need to report this.
                try:
                    feature.save()
                    project.crane_project.features.append(feature)
                except Exception as e:
                    messages.append(layername + ' not saved, reason: '+ str(e))
                    continue
                    #TODO: These two calls might be redundant, check if its so.
        project.crane_project.status = "Shapefiles stored. User needs to enter Interpretations"
        print messages
        project.save(cascade = True)
        return "Layers stored"
    except Exception as e:
        project.crane_project.status = "Error unpacking layers"
        project.crane_project.messages = "Error unpacking layers: " + str(e)
        project.save(cascade = True)
        return
示例#10
0
def test_directory_trailing_slash():
    assert fiona.listlayers('tests/data/') == ['coutwildrnp']
示例#11
0
def test_single_file():
    assert fiona.listlayers('tests/data/coutwildrnp.shp') == ['coutwildrnp']
示例#12
0
def test_zip_path_arch(path_coutwildrnp_zip):
    vfs = 'zip://{}'.format(path_coutwildrnp_zip)
    with pytest.warns(FionaDeprecationWarning):
        assert fiona.listlayers('/coutwildrnp.shp', vfs=vfs) == ['coutwildrnp']
示例#13
0
def test_directory_trailing_slash(data_dir):
    assert fiona.listlayers(data_dir) == ['coutwildrnp']
示例#14
0
def test_zip_path_arch():
    assert fiona.listlayers("/test_uk.shp", vfs="zip://docs/data/test_uk.zip") == ["test_uk"]
示例#15
0
def test_single_file():
    assert fiona.listlayers("docs/data/test_uk.shp") == ["test_uk"]
示例#16
0
def test_directory():
    assert fiona.listlayers('docs/data') == ['test_uk']
示例#17
0
def test_invalid_vfs():
    with pytest.raises(TypeError):
        fiona.listlayers("/", vfs=1)
示例#18
0
def test_invalid_path():
    with pytest.raises(TypeError):
        fiona.listlayers(1)
示例#19
0
def test_directory_trailing_slash(data_dir):
    assert sorted(fiona.listlayers(data_dir)) == ['coutwildrnp', 'gre', 'test_tin']
示例#20
0
import geopandas as gpd
import pandas as pd
import fiona
import glob
from shapely.geometry import Point, LineString, Polygon
import matplotlib
from rasterstats import zonal_stats
import random
import numpy as np
random.seed(0)


# In[2]:


shed_files = fiona.listlayers(r'./lab3.gpkg')
sheds = []
for files in shed_files:
    if "wdbhuc" in files:
        sheds.append(files)
sample_points = {'point_id': [], 'geometry':[], 'HUC': []}
for polys in sheds:
    shed_files_gdf = gpd.read_file(r'./lab3.gpkg', layer = polys)
    huccode = [f for f in shed_files_gdf.columns if 'HUC' in f][0]
    for idx, row in shed_files_gdf.iterrows():
        j = int(0)
        extent = row['geometry'].bounds
        area_km = row["Shape_Area"]/1000000
        n = (int(round(area_km*0.05)))
        while j < n:
            x = random.uniform(extent[0], extent[2])
示例#21
0
def test_listing_pathobj(path_coutwildrnp_json):
    """list layers from a Path object"""
    pathlib = pytest.importorskip("pathlib")
    assert len(fiona.listlayers(pathlib.Path(path_coutwildrnp_json))) == 1
示例#22
0
def test_listing_file(path_coutwildrnp_json):
    """list layers from an open file object"""
    with open(path_coutwildrnp_json, "rb") as f:
        assert len(fiona.listlayers(f)) == 1
示例#23
0
def test_path_object(path_coutwildrnp_shp):
    path_obj = Path(path_coutwildrnp_shp)
    assert fiona.listlayers(path_obj) == ['coutwildrnp']
示例#24
0
def test_zip_path_arch():
    assert fiona.listlayers('/coutwildrnp.shp', vfs='zip://tests/data/coutwildrnp.zip') == ['coutwildrnp']
示例#25
0
def test_invalid_path_ioerror():
    with pytest.raises(DriverError):
        fiona.listlayers("foobar")
示例#26
0
        raise ValueError('No file geodatabases found at this location.')

# For all except gpkg+split, fiona is not needed.
for gdb in gdb_paths:
    theme = os.path.splitext(os.path.basename(gdb))[0]
    print("Converting {}".format(theme))

    # gdb to gpkg
    if args.format == 'GPKG' and not args.split:
        subprocess.run(["ogr2ogr", "-f", args.format] + opts_ogr2ogr +
                       opts_gpkg + [os.path.join(dest_path, theme + ".gpkg")] +
                       [gdb])

    # gdb to multiple gpkgs
    elif args.format == 'GPKG' and args.split:
        for layer in fiona.listlayers(gdb):
            print("Extracting {}".format(layer))
            subprocess.run(["ogr2ogr", "-f", args.format] + opts_ogr2ogr +
                           opts_gpkg +
                           [os.path.join(dest_path, layer + ".gpkg")] + [gdb] +
                           [layer])

    # gdb to one postgres schema
    elif args.format == 'PostgreSQL':
        subprocess.run(["ogr2ogr", "-f", args.format] + opts_ogr2ogr +
                       opts_pg + [args.dest] + [gdb])

    # # gdb to multiple postgres schemas
    # elif args.format == 'PostgreSQL' and args.split:
    #     pgconn = re.sub(r"active_schema=(\w+)",
    #                     "active_schema=" + theme, args.dest)
示例#27
0
def test_zip_path():
    assert fiona.listlayers('zip://docs/data/test_uk.zip') == ['test_uk']
示例#28
0
def analyze_sandbox(experiments, district=True, export=True):
    print(f"Analyzing {experiments} sandboxes")
    for sandbox, value in experiments.items():
        proxy = Network(f'{sandbox} Sandbox',
                        crs=26910,
                        directory=f'{directory}Sandbox/{sandbox}',
                        nodes='network_intersections')
        db_layers = listlayers(proxy.gpkg)

        # Check if sandbox has links and intersections
        network = [
            'network_links', 'network_intersections', 'land_municipal_boundary'
        ]
        for layer in network:
            if layer not in db_layers:
                raise AttributeError(
                    f"{layer} not found in GeoPackage of {sandbox}")

        for code, year in experiments[sandbox][1].items():
            # Check if experiment has parcels and buildings
            built = [f'land_parcels_{code}', f'fabric_buildings_{code}']
            for layer in built:
                if layer not in db_layers:
                    raise AttributeError(
                        f"{layer} not found in GeoPackage of {sandbox}")

    # Perform network analysis
    na = {}
    for sandbox, value in experiments.items():
        na[f"{sandbox}"] = {}

        # Define geographic boundary
        proxy = Network(f'{sandbox} Sandbox',
                        crs=26910,
                        directory=f'{directory}Sandbox/{sandbox}',
                        nodes='network_intersections')

        # Transfer network indicators to sandbox
        proxy = proxy_network(proxy)

        # Extract elevation data
        proxy.node_elevation()

        for code, year in experiments[sandbox][1].items():
            if district:
                district_net = Network(experiments[sandbox][0], crs=26910)
                for layer in ['network_axial']:
                    overlay_radius(proxy.gpkg,
                                   district_net.gpkg,
                                   sample_layer=layer)

            # Calculate spatial indicators
            proxy = proxy_indicators(proxy, experiment={code: year})

            # Perform network analysis
            results = proxy.network_analysis(
                run=True,
                col_prefix='mob',
                file_prefix=f'mob_{code}',
                service_areas=radii,
                sample_gdf=gpd.read_file(proxy.gpkg,
                                         layer=f"land_parcels_{code}"),
                aggregated_layers=network_layers,
                keep=['OBJECTID', "population, 2016"],
                export=export)

            # Divide sums aggregations to a buffer overlay in order to avoid edge effects
            for col in results.columns:
                if '_sum_' in col:
                    results[col] = results[col] / results['divider']

            na[f"{sandbox}"][f"{code}"] = results
    return na
示例#29
0
def test_directory_trailing_slash():
    assert fiona.listlayers("docs/data/") == ["test_uk"]
示例#30
0
#class to index
def cl2idx(inputarr, dict_label):
    y_label = np.zeros((inputarr.shape[0] ))
    for i in range(inputarr.shape[0]):
            y_label[i] = dict_label[inputarr[i, 0]]
    return y_label

#layers = fiona.listlayers("/Volumes/Meng_Mac/obia/temp/results2.gdb") # feb results
#layers = fiona.listlayers("/Volumes/Meng_Mac/obia/temp/results_0303.gdb") # better water 
#layers = fiona.listlayers("/Volumes/Meng_Mac/obia/temp/P_subv1_1.gdb") # better water 
recall_all = np.zeros(1)
precision_all= np.zeros(1)  
filename = "/Volumes/Meng_Mac/obia/temp/HP1103.gpkg"
resultdir2 = "/Users/menglu/Volumes/Meng_Mac/obia/temp/" # for saving precision and recall npy.
treedir = "/Users/menglu/Downloads"
layers = fiona.listlayers(f'{filename}') 
layers
#filename = "/Volumes/Meng_Mac/obia/temp/HP1103.gpkg"

''' run 
# params
# j: iterate over layers
# k: iterate over classes 
# filename: directory of a file that can be read by geopandas
# seednum: number of CV iterations 
# threshold: classify as positive if higher than this probability
# resultdir: dir to save variable importance plots. if not provided, then varimp are not calcuated

# return
# two numbers, recall and precision
# recall and precision are -1 if not calculated due to too few objects.  
示例#31
0
def test_single_file(path_coutwildrnp_shp):
    assert fiona.listlayers(path_coutwildrnp_shp) == ['coutwildrnp']
示例#32
0
              ax=ax)
ax.set_title("Arizona stream gauge drainge area\n (sq km)")
plt.show()

# %%
# adding more datasets
# https://www.usgs.gov/core-science-systems/ngp/national-hydrography/access-national-hydrography-products
# https://viewer.nationalmap.gov/basic/?basemap=b1&category=nhd&title=NHD%20View

# Example reading in a geodataframe
# Watershed boundaries for the lower colorado
filename2 = 'WBD_15_HU2_GDB.gdb'
filepath2 = os.path.join('data\Week10', filename2)
print(os.getcwd())
print(filepath2)
fiona.listlayers(filepath2)
HUC6 = gpd.read_file(filepath2, layer="WBDHU6")

# plot the new layer we got:
fig, ax = plt.subplots(figsize=(5, 5))
HUC6.plot(ax=ax)
ax.set_title("HUC Boundaries")
plt.show()

HUC6.crs

# %%
# Add some points
# UA:  32.22877495, -110.97688412
# STream gauge:  34.44833333, -111.7891667
point_list = np.array([[-110.97688412, 32.22877495],
示例#33
0
def test_zip_path_arch(path_coutwildrnp_zip):
    vfs = 'zip://{}'.format(path_coutwildrnp_zip)
    assert fiona.listlayers('/coutwildrnp.shp', vfs=vfs) == ['coutwildrnp']
# Read in using geopandas
file = os.path.join('../data/GIS_files', 'gagesII_9322_sept30_2011.shp')
gages = gpd.read_file(file)

# Filter to only AZ gauges
gages.columns
gages.STATE.unique()
gages_AZ = gages[gages['STATE'] == 'AZ']

# Dataset 2: Watershed boundaries for the Lower Colorado
# Download WBD_15_HU2_GDB.gdb from USGS here:
# https://www.usgs.gov/core-science-systems/ngp/national-hydrography/access-national-hydrography-products
# https://viewer.nationalmap.gov/basic/?basemap=b1&category=nhd&title=NHD%20View
# Read in using geopandas
file = os.path.join('../data/GIS_files', 'WBD_15_HU2_GDB.gdb')
fiona.listlayers(file)
HUC6 = gpd.read_file(file, layer="WBDHU6")

# Filter to only Verde River Watershed
HUC6.columns
HUC6.name.unique()
HUC6_Verde = HUC6[HUC6['name'] == 'Verde']

# Dataset 3: Major rivers/streams
# Download USA Rivers and Streams from Esri here:
# https://hub.arcgis.com/datasets/esri::usa-rivers-and-streams?geometry=-115.952%2C31.858%2C-109.014%2C33.476
# Read in using geopandas
file = os.path.join('../data/GIS_files', 'USA_Rivers_and_Streams.shp')
rivers_USA = gpd.read_file(file)

# Filter to only AZ
示例#35
0
def ls(ctx, input, indent):
    """
    List layers in a datasource.
    """
    result = fiona.listlayers(input)
    click.echo(json.dumps(result, indent=indent))
示例#36
0
def test_single_file(path_coutwildrnp_shp):
    assert fiona.listlayers(path_coutwildrnp_shp) == ['coutwildrnp']
示例#37
0
def test_directory():
    assert fiona.listlayers('tests/data') == ['coutwildrnp']
示例#38
0
def test_directory(data_dir):
    assert fiona.listlayers(data_dir) == ['coutwildrnp', 'gre']
示例#39
0
def test_zip_path():
    assert fiona.listlayers('zip://tests/data/coutwildrnp.zip') == ['coutwildrnp']
示例#40
0
def test_directory_trailing_slash(data_dir):
    assert fiona.listlayers(data_dir) == ['coutwildrnp', 'gre']
# Examine
import fiona

CSI_GDB = r"data/CSI_2014_Complete.gdb"
unique_fields = set()
count = 1

with open("csi_fields.md", 'w') as f:
    f.write("---\ntitle: Contaminant Source Inventories--Existing Schema\n")
    f.write("author: Eric Goddard\n")
    f.write("date: \\today\n")
    f.write("geometry: margin=1in\n---\n\n")
    f.write("##Layers\n")
    with fiona.drivers():
        for layername in fiona.listlayers(CSI_GDB):
            f.write("###{layer}\n".format(layer=layername))

            with fiona.open(CSI_GDB, layer=layername, driver="FileGDB") as src:
                unique_fields.update(key.upper() for key in \
                        src.schema['properties'].keys())
                field_num = 1
                for field in src.schema['properties']:
                    f.write("{f}. {field} ({type})\n".format(f=field_num,
                        field=field, type=src.schema['properties'][field]))
                    field_num += 1
                f.write("\n\n")


    f.write("## Unique Fields  \n")
    for field in sorted(unique_fields):
        f.write("{c}. {f}\n".format(c=count, f=field))
示例#42
0
def test_zip_path(path_coutwildrnp_zip):
    assert fiona.listlayers(
        'zip://{}'.format(path_coutwildrnp_zip)) == ['coutwildrnp']
示例#43
0
def test_single_file():
    assert fiona.listlayers('docs/data/test_uk.shp') == ['test_uk']
示例#44
0
def test_zip_path_arch(path_coutwildrnp_zip):
    vfs = 'zip://{}'.format(path_coutwildrnp_zip)
    assert fiona.listlayers('/coutwildrnp.shp', vfs=vfs) == ['coutwildrnp']
示例#45
0
def test_directory_trailing_slash():
    assert fiona.listlayers('docs/data/') == ['test_uk']
示例#46
0
def _historical(
    gdf: gpd.GeoDataFrame,
    others_path: Union[str, pathlib.Path],
    date_col: str,
    direction: str = "nearest",
    default_year: str = None,
    op: str = "intersection",
    field: str = None,
    return_source: bool = False,
) -> Union[pd.Series, tuple]:
    """
    Checks whether records in gdf intersect with features in other or if
    column values from gdf match with other column values after a spatial
    join for historical data. Supposing one has two or more layers for
    different years in time, this function will match each record with
    of those layers depending on the date of collection. Then, for each
    layer, it will compare the respective records by executing one of the
    check_intersect or check_match functions.

    Parameters
    ----------
    gdf : GeoDataFrame
        GeoDataFrame with records.
    others_path : str or Path
        Folder with shapefiles or GeoPackage file with historical data.
        Shapefile names of GeoPackage layer names must have a four-digit
        year anywhere in order to extract it and match it with the
        records' collection date.
    date_col : str
        Column name with the collection date.
    direction : str
        Whether to search for prior, subsequent, or closest years. Can be
         "backward", "nearest" or "forward".
    default_year : str
        Default year to take for records that do not have a collection
        date or whose collection date did not match with any year. Can be:

        - 'first': takes the earliest year in the historical data.
        - 'last': takes the latest year in the historical data.
        - None: skips a default year assignation. Keep in mind that
        records without a collection date won't be validated.
    op : str
        Operation to execute. Can be "intersection" to execute
        check_intersection or "match" to execute check_match.
    field : str
        Field to get from layers when `op` is "match".
    return_source : bool
        Whether to return a column with layer source.

    Returns
    -------
    result : pd.Series
        Extracted values.
    source : pd.Series
        Corresponding source. Only provided if return_source is True.

    """
    if not isinstance(others_path, pathlib.Path):
        others_path = pathlib.Path(others_path)

    if others_path.is_dir():
        layers = list(others_path.glob("*.shp"))
        if not layers:
            raise Exception("`others_path` must contain shapefiles.")
        input_type = "shp"
    else:
        if others_path.suffix == ".gpkg":
            layers = fiona.listlayers(others_path)
            input_type = "gpkg"
        else:
            raise ValueError("`others_path` must be a GeoPackage file.")

    years = list(map(_extract_year, layers))
    historical_year = _get_nearest_year(gdf[date_col],
                                        years,
                                        direction=direction,
                                        default_year=default_year)

    result = pd.Series(index=gdf.index, dtype="object")
    if return_source:
        source = pd.Series(index=gdf.index, dtype="object")

    for year in historical_year.dropna().unique():
        layer = layers[years.index(year)]
        if input_type == "shp":
            other = gpd.read_file(layer)
            year_source = layer.stem
        elif input_type == "gpkg":
            other = gpd.read_file(others_path, layer=layer)
            year_source = layer

        mask = historical_year == year
        year_gdf = gdf[mask]
        if op == "intersection":
            year_result = intersects_layer(year_gdf, other)
        elif op == "match":
            year_result = get_layer_field(year_gdf, other, field)
        else:
            raise ValueError("`op` must be either 'intersection' or 'match'.")

        result.loc[mask] = year_result
        if return_source:
            source.loc[mask] = year_source

    if return_source:
        return result, source
    else:
        return result
示例#47
0
def test_zip_path_arch():
    assert fiona.listlayers('/test_uk.shp', vfs='zip://docs/data/test_uk.zip') == ['test_uk']
示例#48
0
import fiona
import geopandas as gpd

gdb_file = "C:\\Data\\Projects_GIS\\2020\\001_a_GEODB\\BRK_Mapping_Pluvial_IT_US\\BRK_input.gdb"
# Get all the layers from the .gdb file
layers = fiona.listlayers(gdb_file)

for layer in layers:
    gdf = gpd.read_file(gdb_file, layer=layer)
    # Do stuff with the gdf
示例#49
0
def test_directory():
    assert fiona.listlayers("docs/data") == ["test_uk"]
示例#50
0
import pyepsg
import folium
import numpy as np
from shapely.geometry import MultiLineString, mapping, shape
from folium import plugins
import math

from grade_data_process_functions import read_data
from grade_data_process_functions import read_data

# 1.2 Set Global Parameters
path_to_data = (r"C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc"
                r"\Documents\Passive Projects\Freeval-PA\grade_data"
                r"\June_23_2020")
path_to_grade_data_file = os.path.join(path_to_data, "Processing.gdb")
fiona.listlayers(path_to_grade_data_file)

# 2 read data
# -----------------------------------------------------------------------------

grade_gdf = read_data(filename_gdf=path_to_grade_data_file,
                      layer_gdf="SpatialJoin_GradeDataFINAL")

grade_gdf_asc_sort = (
    grade_gdf.loc[lambda x: x.seg_no.astype(int) % 2 == 0].sort_values(
        by=["name", "fkey"], ascending=[True, True]).reset_index(drop=True))

grade_gdf_desc_sort = (
    grade_gdf.loc[lambda x: x.seg_no.astype(int) % 2 != 0].sort_values(
        by=["name", "fkey"], ascending=[True, False]).reset_index(drop=True))
示例#51
0
def test_zip_path():
    assert fiona.listlayers("zip://docs/data/test_uk.zip") == ["test_uk"]
示例#52
0
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score
import xgboost as xgb
import fiona


def cl2idx(inputarr, dict_label):
    y_label = np.zeros((inputarr.shape[0]))
    for i in range(inputarr.shape[0]):
        y_label[i] = dict_label[inputarr[i, 0]]
    return y_label


filedir = "/Users/menglu/Documents/GitHub/waaden/OBIA_RF/results"

layers = fiona.listlayers("/Volumes/Meng_Mac/obia/results2.gdb")


#j=1
def run(j):
    # joind = gp.read_file("/Volumes/Meng_Mac/obia/results2.gdb", layer = layers[j])
    joind = gp.read_file("/Users/menglu/Downloads/water_test_p2bwater.gdb",
                         layer=layers[j])

    classes = ["P1a1", "P1a2", "P2b", "P2c"]
    #H1    H2     O (1) P1a1 (4)  P1a2 (6)   P2b   P2c   S1a (0)   S1c    S2    S3
    df1 = pd.DataFrame(joind.drop(columns='geometry'))
    df1 = df1.replace([np.inf, -np.inf], np.nan).dropna()
    #classes

    # %% [code]
示例#53
0
# -

data_dir = os.environ['hsfm_geomorph_data']

data_dir

# ## Open up KML Files
# Make sure to open all layers explicitly

# +
file_paths = ['NAGAP_1970s.kml', 'NAGAP_1980s.kml', 'NAGAP_1990s.kml']

df_list = []
for path in file_paths:
    path = os.path.join(data_dir, path)
    for layer in fiona.listlayers(path):
        try:
            df_list.append(gpd.read_file(path, driver='KML', layer=layer))
        except ValueError:
            None
df = pd.concat(df_list)
# -

len(df)

df.head()

# Change CRS to Web Mercator for easy plotting

df = df.to_crs(epsg=3857)
示例#54
0
    """ Process Feature
    Args:
        f (dictionary) : input fiona feature.
    Returns:
        d (dictionary) : geometry converted to geoJSON and added to dict.    
    """
    geom = shape(f["geometry"])    
    wkt = geom.wkt
    f["geometry"] = wkt
    return f


# In[62]:

with fiona.drivers():
    for layername in fiona.listlayers(file_path_gpkg):
        with fiona.open(file_path_gpkg, layer=layername) as src:
            # src is an iterator and might get parallelized. 
            for f in src:
                d = process_feature(f)
            


# In[63]:




# In[ ]:

示例#55
0
def test_directory(data_dir):
    assert fiona.listlayers(data_dir) == ['coutwildrnp']
示例#56
0
def export_edge_noise_csv(edge_noises: pd.DataFrame, out_dir: str):
    max_id = edge_noises[E.id_ig.name].max()
    csv_name = f'{max_id}_edge_noises.csv'
    edge_noises.to_csv(out_dir + csv_name)


if (__name__ == '__main__'):
    log = Logger(printing=True, log_file='noise_graph_join.log', level='debug')
    graph = ig_utils.read_graphml('data/hma.graphml')
    log.info(f'read graph of {graph.ecount()} edges')
    edge_gdf = ig_utils.get_edge_gdf(graph, attrs=[E.id_ig])
    edge_gdf = edge_gdf.sort_values(E.id_ig.name)

    # read noise data
    noise_layer_names = [
        layer for layer in fiona.listlayers('data/noise_data_processed.gpkg')
    ]
    noise_layers = {
        name: gpd.read_file('data/noise_data_processed.gpkg', layer=name)
        for name in noise_layer_names
    }
    noise_layers = {
        name: gdf.rename(columns={'db_low': name})
        for name, gdf in noise_layers.items()
    }
    log.info(f'read {len(noise_layers)} noise layers')

    # read nodata zone: narrow area between noise surfaces of different municipalities
    nodata_layer = gpd.read_file('data/extents.gpkg',
                                 layer='municipal_boundaries')
示例#57
0
def test_zip_path(path_coutwildrnp_zip):
    assert fiona.listlayers(
        'zip://{}'.format(path_coutwildrnp_zip)) == ['coutwildrnp']
示例#58
0
warnings.filterwarnings("ignore", category=FutureWarning)


def cl2idx(inputarr, dict_label):
    y_label = np.zeros((inputarr.shape[0]))
    for i in range(inputarr.shape[0]):
        y_label[i] = dict_label[inputarr[i, 0]]
    return y_label


filedir = "/Users/menglu/Documents/GitHub/waaden/OBIA_RF/results"
resultdir = "/Users/menglu/Volumes/Meng_Mac/obia/results/"

# %% [code]

layers = fiona.listlayers(
    "/Volumes/Meng_Mac/obia/temp/results2.gdb")  # feb results
layers = fiona.listlayers(
    "/Volumes/Meng_Mac/obia/temp/results_0303.gdb")  # better water
layers = fiona.listlayers(
    "/Volumes/Meng_Mac/obia/temp/P_subv1_1.gdb")  # better water
layers = fiona.listlayers(
    "/Volumes/Meng_Mac/obia/temp/HP1103.gpkg")  # better water
layers
j = 0


#j for layers and k for classes
def run(k,
        j=0,
        filename="/Volumes/Meng_Mac/obia/temp/HP1103.gpkg",
        resultdir="/Volumes/Meng_Mac/obia/results/"):
示例#59
0
def test_layer_index():
    layer = fiona.listlayers(DATA).index('polygons')
    assert list(read_features(DATA, layer=layer)) == target_features
示例#60
0
#------------------------------------------------------------------------------
# Read geodatabase
#------------------------------------------------------------------------------

import geopandas as gpd
import fiona
from bokeh.plotting import figure
from bokeh.io import output_notebook, show

fin=r"E:\Data\ForestInventory\PSP-NADB\Data\01_RawFiles\QC\Release_2017-07\Pep\PEP.gdb"

fiona.listlayers(fin)

dat=gpd.read_file(fin, layer='PLACETTE_MES')
sorted(list(dat.columns.values))
dat.to_csv(r"E:\Data\ForestInventory\PSP-NADB\Data\01_RawFiles\QC\Release_2017-07\PLACETTE_MES.csv")

dat=gpd.read_file(fin, layer='PLACETTE')
sorted(list(dat.columns.values))
dat.to_csv(r"E:\Data\ForestInventory\PSP-NADB\Data\01_RawFiles\QC\Release_2017-07\PLACETTE.csv")

dat=gpd.read_file(fin, layer='STATION_PE')
sorted(list(dat.columns.values))
dat.to_csv(r"E:\Data\ForestInventory\PSP-NADB\Data\01_RawFiles\QC\Release_2017-07\STATION_PE.csv")

dat=gpd.read_file(fin, layer='DENDRO_ARBRES')
sorted(list(dat.columns.values))
dat.to_csv(r"E:\Data\ForestInventory\PSP-NADB\Data\01_RawFiles\QC\Release_2017-07\DENDRO_ARBRES.csv") # , header=False
L=list(dat.columns.values)
L.to_csv(r"E:\Data\ForestInventory\PSP-NADB\Data\01_RawFiles\QC\Release_2017-07\DENDRO_ARBRES_lab.csv")