コード例 #1
0
}

# names of statistical variables
metrics = "sum mean std".split()

# Specify columns to join from shapefile
shp_cols =shp.drop(['geometry'],axis=1)

# Read rasters and join names with path
rasters = {}

for n, d in dirs.items():
        raster = (os.path.join(d, n+".tif"))
        rasters[n]= raster


#copy metrics
spfeas_stats = shp_cols.copy()

# Calculate Rasters
for rast, path in rasters.items():
    print rast
    stats = zs(shp, path, nodata=-999, stats=metrics)
    new_colnames = ["{}_{}".format(rast, metric) for metric in metrics]
    df = pd.DataFrame(stats)
    df2 = df.rename(columns=dict(zip(metrics, new_colnames)))
    spfeas_stats =spfeas_stats.join(df2)

# Save dataframe as csv
spfeas_stats.to_csv("hog_stats_all.csv")
コード例 #2
0
import rasterio as rio
from rasterstats import zonal_stats as zs
import geopandas as gpd
import pandas as pd
import os

from glob import glob
#Read shapefile
shp = gpd.GeoDataFrame.from_file("../zonal_stats/GN_Divisions.shp")

data_dir ="Sri_Lanka_lac_bands"
rasters =glob(os.path.join(data_dir, "*.tif"))


for i in rasters:
    with rio.open(i) as src:
        transform=src.meta['transform']
        array = src.read()

metrics = "sum mean std"
shp_cols =shp[['gid', 'gnd_c','gnd_n']]
#Run zonal stat and save values as csv

for i in array:
    stats = zs(shp, i, stats = metrics, transform=transform, prefix ="lac_")

pd_stats =shp_cols.join(pd.DataFrame(stats))
pd_stats.to_csv('lac_all_stats.csv')

コード例 #3
0
        # Add file name and relative path to dictionary
        rasters[file_name] = tif_relative_path

    # Set output location and file name for zonal statistics CSV file *
    zonal_stats_location = os.path.join(output_directory, "zonal_stats",
                                        "zonalstats_{}.csv".format(feature))

    # Copy metrics
    spfeas_stats = shp_cols.copy()

    # Calculate Rasters
    for rast, path in rasters.items():

        # Calculate stats, this creates a dictionary with metrics as columns and values as rows
        stats = zs(shp, path, stats=metrics)

        # New column names using formatter
        new_colnames = ["{}_{}".format(rast, metric) for metric in metrics]

        # Create dataframe from stats dictionary
        df = pd.DataFrame(stats)

        # Rename the metrics column with the new column names
        # Zip puts the indices of two lists together (zip them together)
        # Dict creates a dictionary because rename needs a dictionary (old name: new name)
        df2 = df.rename(columns=dict(zip(metrics, new_colnames)))
        spfeas_stats = spfeas_stats.join(df2)

    # Save dataframe as csv
    spfeas_stats.to_csv(zonal_stats_location)
コード例 #4
0
raster7="orb_sc51_mean.tif"
raster8="orb_sc51_skew.tif"
raster9="orb_sc51_kurtosis.tif"
raster10="orb_sc51_variance.tif"
raster11="orb_sc71_max.tif"
raster12="orb_sc71_mean.tif"
raster13="orb_sc71_skew.tif"
raster14="orb_sc71_kurtosis.tif"
raster15="orb_sc71_variance.tif"






stats1 = zs(shp, raster1, stats = 

#specify zonal stat mettrics
metrics = "sum mean std"


#Run zonal stat and save values as csv
stats1 = zs(shp, raster1, stats = metrics, prefix ="lac_sc5_")
stats2 = zs(shp, raster2, stats = metrics, prefix ="lac_sc7_")
stats3 = 
pd_stats = pd.DataFrame(stats1).join(pd.DataFrame(stats2))
stats_final = shp.join(pd_stats)
stats_final.to_csv('lac_stats.csv')


コード例 #5
0
                                                     max_depth.read_masks(1)),
                                 transform=max_depth.transform)
    ],
                                     crs=max_depth.crs)

    # Store original areas for damage calculation
    buildings['original_area'] = buildings.area

    # Buffer buildings
    buildings['geometry'] = buildings.buffer(buffer)

    # Extract maximum depth and vd_product for each building
    buildings['depth'] = [
        row['max'] for row in zs(buildings,
                                 depth,
                                 affine=max_depth.transform,
                                 stats=['max'],
                                 all_touched=True,
                                 nodata=max_depth.nodata)
    ]

    # Filter buildings
    buildings = buildings[buildings['depth'] > threshold]

    # Calculate depth above floor level
    buildings['depth'] = buildings.depth - threshold

    if len(buildings) == 0:
        with open(os.path.join(outputs_path, 'buildings.csv'), 'w') as f:
            f.write('')
        exit(0)
コード例 #6
0
ファイル: raster_stats.py プロジェクト: adbeda/spfeas-scripts
    'mean_sc7_mean': 'Sri_Lanka_mean_bands/',
    'mean_sc7_variance': 'Sri_Lanka_mean_bands/'
}

# names of statistical variables
metrics = "sum mean std".split()

# Read rasters and join names with path
rasters = {}
for n, d in dirs.items():
    for r in metrics:
        raster = (os.path.join(d, n + ".tif"))
        name = n + "_" + r
        rasters[name] = raster

# Specify columns to join from shapefile
shp_cols = shp.drop(['geometry'], axis=1)

# Calculate Rasters
for raster, path in rasters.items():
    print raster
    stats = zs(shp, path, nodata=0, stats=metrics)
    df = pd.DataFrame(stats)
    # Rename names of columns
    new_colnames = ["{}_{}".format(raster, metric) for metric in metrics]
    df2 = df.rename(columns=dict(zip(metrics, new_colnames)))
    working_zones = shp_cols.join(df2)  # append to working copy

# Save dataframe as csv
working_zones.to_csv("means_stats_all.csv")
コード例 #7
0
import rasterio as rio
from rasterstats import zonal_stats as zs
import geopandas as gpd
import pandas as pd
import os

#Read shapefile
shp = gpd.GeoDataFrame.from_file("../../zonal_stats/GN_Divisions.shp")

#Read Raster
raster1="lac_sc5_lac.tif"
raster2="lac_sc7_lac.tif"

#specify zonal stat mettrics
metrics = "sum mean std"

#Run zonal stat and save values as csv
stats1 = zs(shp, raster1, stats = metrics, prefix ="lac_sc5_")
stats2 = zs(shp, raster2, stats = metrics, prefix ="lac_sc7_")
pd_stats = pd.DataFrame(stats1).join(pd.DataFrame(stats2))
stats_final = shp.join(pd_stats)
stats_final.to_csv('lac_stats.csv')