Exemplo n.º 1
0
Created on Tue Mar 21 19:39:57 2017

@author: Gretel_MacAir
"""
# %% Import libs

import shapefile
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import pickle

# %% Read shapefile

path = "[PUT YOUR OWN]"
shape_file = "ZillowNeighborhoods-WA.shp"
sf = shapefile.Reader(path + shape_file)

# %% Create list with Seattle shapes

counter = 0
seattle_areas = []
for rec in sf.iterRecords():
    if rec[2] == 'Seattle':
        neighboorhood = rec[3]
        seattle_areas.append((counter, neighboorhood))
    counter += 1

# %% Create coordinate pair list for rectangle slightly larger than Seattle


def create_coordinates_list(lat_min, lat_max, lon_min, lon_max):
import rasterio
import rasterio.features
import rasterio.warp
import rasterio.drivers
import rasterio.mask
import matplotlib.pyplot as pyplot
import numpy
import numpy.ma as ma
import geojson
import shapefile
import pandas as pd
import SimpleITK as sitk

#%%
#Extract polygons from Voronoi Cells and convert into geojson
shape = shapefile.Reader(
    "C:/DataAnalysis/SegmentationOfData/Voronoi/Voronoi.shp")
#first feature of the shapefile
feature = shape.shapeRecords()[0]
# geo interface of the shape
featureGeoInterface = feature.shape.__geo_interface__
geoJsonString = geojson.dumps(featureGeoInterface, sort_keys=True)
geoJsonObject = geojson.loads(geoJsonString)

#%%
# mask and crop image
with rasterio.open(
        'C:/DataAnalysis/SegmentationOfData/20190425MSC_index_ndvi_transformed.tif'
) as src:
    croppedMaskedImageNDVI = rasterio.mask.mask(src, [geoJsonObject],
                                                crop=True,
                                                nodata=-1000.)
Exemplo n.º 3
0
import numpy as np
import pandas as pd
import shapefile as shp
import matplotlib.pyplot as plt
import seaborn as sns
import csv

sns.set(style="whitegrid", palette="pastel", color_codes=True)
sns.mpl.rc("figure", figsize=(10,6))

# opening the vector map
shp_path = "D:\\MEGA\\Core CS\\Projects\\TNWaterMap\\resources\\tamilnadu_district.shp"

# reading the shape file by using reader function of the shape lib
sf = shp.Reader(shp_path)

# print(len(sf.shapes()))
# print(sf.records())
# print(sf.records()[1][5])

# with open('tn_districts.csv', mode='w') as f:
#     f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
#     for record in sf.records():
#         f.writerow(record)

# with open('tn_districts.csv', mode='r') as f:
#     f = csv.DictReader(f)
#     line_count = 0
#     for row in f:
#         if line_count == 0:
#             print(f'Column names are {", ".join(row)}')
Exemplo n.º 4
0
    plt.fill(*shape.exterior.xy, facecolor = 'khaki', edgecolor = 'black', linewidth = 0.5)
ax.set_facecolor('xkcd:lightblue')
plt.savefig(mapOutFile)
print("Saved map to file: {}".format(mapOutFile))

if build == False:
    print("Exiting. Check the saved map to verify polygons")
    print("Re-run with '--build' option to generate the visibility graph.")
    exit(0)

##############################
# Shapes -> Visibility graph #
##############################

## Read shapefile
input_shapefile = shapefile.Reader(shapeOutFile)
shapes = input_shapefile.shapes()

# Load raster
regionData = gdal.Open(regionRasterFile)
regionExtent = getGridExtent(regionData)
minx = regionExtent["minx"]
maxx = regionExtent["maxx"]
miny = regionExtent["miny"]
maxy = regionExtent["maxy"]

polygons = []
for shape in shapes:
    polygon = []
    for point in shape.points:
        x = ((round(point[0], 10) - minx) / (maxx - minx) * rangeWidth)
Exemplo n.º 5
0
import matplotlib.pyplot as plt  #matplot library for plotting
import pandas as pd
from ortools.linear_solver import pywraplp  # Or-tools library

n_stops = 50  #Number of stops
fleet_size = 25
veh_capacity = 12
max_demand = 10
depo = 1

random.seed(2)  #initializing
# first column is x-axis, second: y-axis, third: demand, fourth: dummy variable, no reason to assign this variable.
coords_X = []
attr_X_list = []
reader = shapefile.Reader(
    r"C:\Users\shyam.rampalli\OneDrive - Nanyang Technological University\documents\Conf - Mar 2\Data\AV Stops.shp"
)
fields = [field[0] for field in reader.fields[1:]]
for feature in reader.shapeRecords():
    geom = feature.shape.__geo_interface__
    field_names = [field for field in fields]
    atr = dict(zip(field_names, feature.record))
    attr_X_list.append(atr['name'])
    coords_X.append([
        geom['coordinates'][1], geom['coordinates'][0], atr['name'],
        random.randint(0, max_demand),
        int(0)
    ])
coords_X = random.sample(coords_X, n_stops)
X1 = np.asarray(coords_X)
X1_df = pd.DataFrame(np.asarray(coords_X)[:, :3],
Exemplo n.º 6
0
import shapefile
import matplotlib.pyplot as plt
import numpy as np

shp = shapefile.Reader('geofiles/nybb_15c/nybb_wgs84.shp')

pos = None
count = 0
for shape in shp.iterShapes():
    points = np.array(shape.points)
    parts = shape.parts
    parts.append(len(shape.points))

    for i in range(len(parts) - 1):
        plt.plot(points.T[0][parts[i]:parts[i + 1]],
                 points.T[1][parts[i]:parts[i + 1]])

plt.savefig('NYC.png')
    






#--load the structure information from the MIKE11 nwk file
file = '..\\MIKE_SHE_Baseline\\Broward_Base_05.nwk11'
structs = mss.load_structure(file)
weirs = mss.load_weirs(file)
culverts = mss.load_culverts(file)

#--load the structure points
file = 'she_structures_culverts_1'
str_shp = shapefile.Reader(shapefile=file)

str_points = str_shp.shapes()
str_header = str_shp.dbfHeader()

#--the dbf attribute index of the str name
str_name_idx = 1
str_br_idx = 0
str_ch_idx = 3

#--load the swrpre polylines
file = 'polylines_active'
ply_shp = shapefile.Reader(shapefile=file)
ply_lines = ply_shp.shapes()
ply_header = ply_shp.dbfHeader()
ply_recs = ply_shp.records()
Exemplo n.º 8
0
def weatherapi(year="2017"):
    # variaveis e paths necessárias
    api_key = "af22cf6a216a427fb88685100b43d048"
    pathfolder = "Dataset" + str(year)
    path = os.path.join("Ardidas" + str(year) + "final",
                        "ardidas" + str(year[-2:]) + ".shp")
    sf = shapefile.Reader(path)
    json_folder = check_jsonstats_folder("JsonApis")
    verify_filename = "LastIndex_WeatherApi" + " " + str(year) + ".json"
    jverifypath = os.path.join(json_folder, verify_filename)
    json_requests = "Requests" + "_forKey_ " + f"{api_key}" + ".json"
    jrequestspath = os.path.join(json_folder, json_requests)
    timefile = "Last_time" + "_forKey_ " + f"{api_key}" + ".json"
    jtimefilepath = os.path.join(json_folder, timefile)
    jsonstring = "Last Iteration" + " " + str(year)
    counter_requests = "Counter"
    lastdate = "Lastdate"
    verify = False
    # verificar existência e extrair info de jsons de controlo
    # controlo de nº de requests/ timestamp em que se atingiu esse limite/ ultimo index no momento paragem
    my_file_index = Path(jverifypath)
    my_file_requests = Path(jrequestspath)
    my_file_lasttime = Path(jtimefilepath)

    if my_file_requests.is_file():
        with open(jrequestspath, "r") as file:
            data_r = file.read()
        file_dict = json.loads(data_r)
        number_requests = file_dict[counter_requests]
    else:
        number_requests = 0

    if my_file_lasttime.is_file():
        with open(jtimefilepath, "r") as file:
            data_t = file.read()
        file_dict = json.loads(data_t)
        last_date = file_dict[lastdate]
        last_date_obj = datetime.strptime(last_date, "%d/%m/%Y %H:%M:%S")
        delta = datetime.now() - last_date_obj
        delta_in_sec = delta.total_seconds()
        delta_in_hours = divmod(delta_in_sec, 3600)[0]
        if delta_in_hours < 24:
            sys.exit(
                "Ainda não passou o periodo de espera para efetuar requests à api!"
            )
        else:
            pass
    else:
        pass

    if my_file_index.is_file():

        with open(jverifypath, "r") as file:
            data = file.read()

        file_dict = json.loads(data)

        index = file_dict[jsonstring]

    else:
        index = 0

    api = Api(api_key)

    # ciclo para percorrer a totalidade do dataset
    for i in range(index, len(sf)):

        if my_file_requests.is_file():
            verify = verify_last_request_date(jrequestspath)
            update_verify_jsontime(number_requests, jtimefilepath, verify)

        shape = sf.shapeRecord(i)

        pathjson = os.path.join(pathfolder, str(i))
        filename = "Weather.json"
        filepath = os.path.join(pathjson, filename)

        longitude, latitude = centerpointshape(shape)

        if int(year) == 2017:
            DHInicio = shape.record.DHInicio
            DHFim = shape.record.DHFim

        else:
            DHInicio = shape.record.data_inici
            DHFim = shape.record.data_fim

        # formatar data extraida do shapefile
        date_format_str = '%Y-%m-%d %H:%M:%S.%f'
        start = datetime.strptime(DHInicio, date_format_str)
        final = datetime.strptime(DHFim, date_format_str)

        # calculos para seleccionar o tipo de request a efetuar
        delta = final - start
        delta_in_sec = delta.total_seconds()
        delta_in_hours = divmod(delta_in_sec, 3600)[0]
        dias_completos = math.floor(delta_in_hours / 24)
        num_horas_restantes = delta_in_hours - (24 * dias_completos)

        initial_dateime = start + timedelta(hours=-1)
        initial_date = initial_dateime.strftime("%Y-%m-%d:%H")

        # primeiro caso
        if delta_in_hours >= 24:
            # parte relativa aos dias completos
            for k in range(1, dias_completos + 1):
                new_end = initial_dateime + timedelta(days=1)
                new_end_date = new_end.strftime("%Y-%m-%d:%H")
                filename = os.path.join(pathjson, f"{k}.json")

                weather_request(api, latitude, longitude, initial_date,
                                new_end_date, filename)
                number_requests += 1

                initial_dateime = new_end
                initial_date = initial_dateime.strftime("%Y-%m-%d:%H")

                updatejsonrequest(number_requests, jrequestspath)
                update_verify_jsontime(number_requests, jtimefilepath, verify)

            # parte relativa às horas restantes
            if num_horas_restantes != 0:
                new_initial = start + timedelta(days=dias_completos, hours=-1)
                new_initial_date = new_initial.strftime("%Y-%m-%d:%H")

                temporal_dif = final - new_initial
                temporal_dif_hours = temporal_dif.total_seconds() / 3600
                if temporal_dif_hours < 24:
                    new_end = initial_dateime + timedelta(
                        hours=math.floor(temporal_dif_hours) + 1)
                else:
                    new_end = initial_dateime + timedelta(
                        hours=math.floor(temporal_dif_hours))

                new_end_date = new_end.strftime("%Y-%m-%d:%H")

                filename = os.path.join(pathjson, f"{dias_completos + 1}.json")
                weather_request(api, latitude, longitude, new_initial_date,
                                new_end_date, filename)
                number_requests += 1

                updatejsonrequest(number_requests, jrequestspath)
                update_verify_jsontime(number_requests, jtimefilepath, verify)

            # Merge dos jsons e posterior eliminação dos jsons temporários
            listfiles = []
            for file in os.listdir(pathjson):
                if re.match(r"[0-9]+.json", file):
                    fpath = os.path.join(pathjson, file)
                    listfiles.append(fpath)

            merge_json(listfiles, pathjson)
            del_files(listfiles)

        # segundo caso (no limite de durar 24 horas)
        elif 22 <= delta_in_hours < 24:
            final_plus_1day = start + timedelta(days=1, hours=-1)
            end_date = final_plus_1day.strftime("%Y-%m-%d:%H")

            weather_request(api, latitude, longitude, initial_date, end_date,
                            filepath)
            number_requests += 1

        # terceiro caso (última possibilidade)
        else:
            final_plus_2hours = final + timedelta(hours=1)
            end_date = final_plus_2hours.strftime("%Y-%m-%d:%H")

            weather_request(api, latitude, longitude, initial_date, end_date,
                            filepath)
            number_requests += 1

        # criação de dicionarios para json de controlo de index
        json_dict = {jsonstring: i}
        json_dict_exception = {jsonstring: 0}
        """ 
            Escrita de ficheiro json para guardar index onde o programa parou ou reiniciar index se terminou o ciclo.
            Por questão de segurança guarda o penúltimo indice para assegurar que a parte meteorológica do dataset 
        é gerada na sua totalidade e corretamente.
        
        """
        if i < len(sf) - 1:
            with open(jverifypath, 'w') as output:
                json.dump(json_dict, output, indent=4)
        else:
            with open(jverifypath, 'w') as output:
                json.dump(json_dict_exception, output, indent=4)

        updatejsonrequest(number_requests, jrequestspath)
Exemplo n.º 9
0
# -*- coding: utf-8 -*-
"""
@author: D. Irga B. Naufal Fakhri
"""
import shapefile  # Meng-import library shapefile
sf = shapefile.Reader(
    "Nomor10")  # Berguna untuk membaca file dari nama filenya tanpa ekstensi
isiData = sf.records(
)  # Untuk membaca isi record dari file shp yang kita baca dan memasukkannya kedalam variable
print(isiData)  # Menampilkan isi dari variable
Exemplo n.º 10
0
        ## able to use -1 as the end below.
        points = shape.points
        points.append(points[0])
        points = np.array(points)

        ## Plot by individual parts
        for i, j in zip(parts[:-1], parts[1:]):
            axes.plot(points[i:j, 0],
                      points[i:j, 1],
                      c=colors.get(this_level, "0"),
                      lw=max(1.5 - 0.5 * this_level, 0.5))


if __name__ == "__main__":

    ### Example usage of the above function.

    ## Get the shapefile and create a sf object
    shp = "..\\_data\\Shapefile\\Nigeria.shp"
    sf = shapefile.Reader(shp)

    ## Create a dataframe to be plotted on the map
    names = [record[1] for record in sf.records() if record[1].count(":") == 3]
    df = pd.Series({n.lower(): i for n, i in zip(names, range(len(names)))})

    ## Make the plot
    fig, axes = plt.subplots()
    PlotDfonSf(fig, axes, df, sf, colorbar=True, admin_level={2}, alpha=0.45)
    PlotBorders(fig, axes, sf, admin_level={0, 1, 2})
    plt.show()
Exemplo n.º 11
0
"""


def build_soho_poly(bbox):

    coords = np.array([[bbox[0], bbox[2]], [bbox[0], bbox[3]],
                       [bbox[1], bbox[3]], [bbox[1], bbox[2]],
                       [bbox[0], bbox[2]]])
    sohopoly = sg.Polygon(coords)
    return sohopoly


if __name__ == '__main__':

    # Pull in pump and death locations
    pump_shps = shapefile.Reader("SnowGIS/Pumps.shp")
    death_shps = shapefile.Reader("SnowGIS/Cholera_Deaths.shp")

    # Extract xy locations
    pumps_xy = np.array([s.points[0] for s in pump_shps.shapes()])
    deaths_xy = np.array([d.points[0] for d in death_shps.shapes()])

    # Full color image
    # img = mpimg.imread('OSMap.png')
    # f = open('OSMap.tfw', 'rb')

    # Grayscale image
    img = mpimg.imread('SnowGIS/OSMap_Grayscale.tif')
    f = open('SnowGIS/OSMap_Grayscale.tfw', 'rb')

    # tfw file includes scaling information for the tiff image
Exemplo n.º 12
0
def get_geometry_from_various(locations_config=[], **query_args):
    """Creates a shapely geometry using given query kwargs arguments

    :param locations_config: EODAG locations configuration
    :type locations_config: list
    :param query_args: query kwargs arguments from core.search() method
    :type query_args: dict
    :returns: shapely geometry found
    :rtype: :class:`shapely.geometry.BaseGeometry`
    :raises: :class:`ValueError`
    """
    geom = None

    if "geometry" in query_args:
        geom_arg = query_args["geometry"]

        bbox_keys = ["lonmin", "latmin", "lonmax", "latmax"]
        if isinstance(geom_arg, dict) and all(k in geom_arg
                                              for k in bbox_keys):
            # bbox dict
            geom = Polygon((
                (geom_arg["lonmin"], geom_arg["latmin"]),
                (geom_arg["lonmin"], geom_arg["latmax"]),
                (geom_arg["lonmax"], geom_arg["latmax"]),
                (geom_arg["lonmax"], geom_arg["latmin"]),
            ))
        elif isinstance(geom_arg, (list, tuple)) and len(geom_arg) >= 4:
            # bbox list
            geom = Polygon((
                (geom_arg[0], geom_arg[1]),
                (geom_arg[0], geom_arg[3]),
                (geom_arg[2], geom_arg[3]),
                (geom_arg[2], geom_arg[1]),
            ))
        elif isinstance(geom_arg, str):
            # WKT geometry
            geom = shapely.wkt.loads(geom_arg)
        elif isinstance(geom_arg, BaseGeometry):
            geom = geom_arg
        elif geom_arg is None:
            pass
        else:
            raise TypeError("Unexpected geometry type: {}".format(
                type(geom_arg)))

    # look for location name in locations configuration
    locations_dict = {loc["name"]: loc for loc in locations_config}
    # The location query kwargs can either be in query_args or in query_args["locations"],
    # support for which were added in 2.0.0 and 2.1.0 respectively.
    # The location query kwargs in query_args is supported for backward compatibility,
    # the recommended usage is that they are in query_args["locations"]
    locations = query_args.get("locations")
    locations = locations if locations is not None else {}
    # In query_args["locations"] we can check that the location_names are correct
    locations = locations if locations is not None else {}
    for location_name in locations:
        if location_name not in locations_dict:
            raise ValueError(f"The location name {location_name} is wrong. "
                             f"It must be one of: {locations_dict.keys()}")
    query_locations = {**query_args, **locations}
    for arg in query_locations.keys():
        if arg in locations_dict.keys():
            found = False
            pattern = query_locations[arg]
            attr = locations_dict[arg]["attr"]
            with shapefile.Reader(locations_dict[arg]["path"]) as shp:
                for shaperec in shp.shapeRecords():
                    if re.search(pattern, shaperec.record[attr]):
                        found = True
                        new_geom = shape(shaperec.shape)
                        # get geoms union
                        geom = new_geom.union(geom) if geom else new_geom
            if not found:
                raise ValueError(
                    f"No match found for the search location '{arg}' "
                    f"with the pattern '{pattern}'.")

    return geom
Exemplo n.º 13
0
import pykrige.kriging_tools as kt
from pykrige.rk import OrdinaryKriging
from pykrige.uk import UniversalKriging
import pandas as pd

import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.patches as mpatches

import warnings
warnings.filterwarnings("ignore", category=UserWarning)

import shapefile as shp
sf = shp.Reader("L090102_ComuneMilano.shp")

# reading the data
data = pd.read_csv('result.csv', sep=';', decimal='.')
col_list = ["UTM_Est", "Utm_Nord", "Valore"]

rr = data
#leave one out
#non = "Milano v.Feltre"
non = ""
#data = data.loc[data.NomeStazione != non]

#selezione colonne
data = np.array(data[col_list])

X0, X1 = data[:, 0].min(), data[:, 0].max()
Exemplo n.º 14
0
    def readPointsShape(self, directory):
        # Inicializacion de variables
        num_Zmax = []
        num_Zmin = []
        self.clc = calculosD()
        self.areas = []
        # Variables de contadores
        cont1 = 0
        cont2 = 0
        # Variables para eliminar duplicados
        max_drop = []
        min_drop = []
        # Variables para almacenar los puntos
        self.pointXmax = []
        self.pointYmax = []
        self.pointZmax = []
        self.pointXmin = []
        self.pointYmin = []
        self.pointZmin = []

        try:
            # Lee la data del archivo
            sf = shapefile.Reader(directory)
            fields = [x[0] for x in sf.fields][1:]
            records = [y[:] for y in sf.records()]

            # Se escribe en el datagrama y se agrupa por Shape_Area
            self.data = pd.DataFrame(columns=fields, data=records)
            columnsneeded = ['X', 'Y', 'Z', 'Shape_Area']

            if columnsneeded == [
                    ele for ele in columnsneeded if (ele in self.data.columns)
            ]:
                s = self.data[['X', 'Y', 'Z']].groupby(self.data['Shape_Area'])

                # Se obtienen los max y min de los datos seleccionados
                maxT = pd.DataFrame(s['Z'].max())
                minT = pd.DataFrame(s['Z'].min())

                # Asignacion  valores min y max a listas
                for i in maxT['Z']:
                    num_Zmax.append(i)

                for i in minT['Z']:
                    num_Zmin.append(i)
            else:
                raise KeyError

            # Extraccion de fila correspondiente a los max valores
            for names, groups in s:
                self.areas.append(names)
                max_values = groups[groups['Z'] == num_Zmax[cont1]]
                # Almacena temporalmente la data del punto seleccionado
                tempMax = max_values.drop_duplicates(subset=None,
                                                     keep="first",
                                                     inplace=False)

                # Comprobacion de tamano para escoger el primer caso
                if len(tempMax) > 1:
                    p = tempMax[tempMax['X'] == tempMax['X'].max()]
                    max_drop.append(p)
                else:
                    max_drop.append(tempMax)
                cont1 = cont1 + 1

            # Extraccion de fila correspondiente a los min valores
            for names, groups in s:
                min_values = groups[groups['Z'] == num_Zmin[cont2]]
                # Almacena temporalmente la data del punto seleccionado y elimina duplicados
                tempMin = min_values.drop_duplicates(subset=None,
                                                     keep="first",
                                                     inplace=False)
                # Comprobacion de ta`mano para escoger el dato correcto
                if len(tempMin) > 1:
                    p = tempMin[tempMin['X'] == tempMin['X'].min()]
                    min_drop.append(p)
                else:
                    min_drop.append(tempMin)
                cont2 = cont2 + 1

            # Reemplazo de los datos por posibles inconsistencias en el formato de origen
            for rows in max_drop:
                for x in rows.X:
                    if (isinstance(x, float)):
                        self.pointXmax.append(float(x))
                    else:
                        # Reemplaza la coma por el punto
                        xm = str(x).replace(',', '.')
                        self.pointXmax.append(float(xm))

                for y in rows.Y:
                    if (isinstance(y, float)):
                        self.pointYmax.append(float(y))
                    else:
                        # Reemplaza la coma por el punto
                        ym = str(y).replace(',', '.')
                        self.pointYmax.append(float(ym))

                for z in rows.Z:
                    if (isinstance(z, float)):
                        self.pointZmax.append(float(z))
                    else:
                        # Reemplaza la coma por el punto
                        zm = str(z).replace(',', '.')
                        self.pointZmax.append(float(zm))

            # Reemplazo para los valores minimos
            for rows in min_drop:
                for x in rows.X:
                    if (isinstance(x, float)):
                        self.pointXmin.append(float(x))
                    else:
                        xm = str(x).replace(',', '.')
                        self.pointXmin.append(float(xm))

                for y in rows.Y:
                    if (isinstance(y, float)):
                        self.pointYmin.append(float(y))
                    else:
                        ym = str(y).replace(',', '.')
                        self.pointYmin.append(float(ym))

                for z in rows.Z:
                    if (isinstance(z, float)):
                        self.pointZmin.append(float(z))
                    else:
                        zm = str(z).replace(',', '.')
                        self.pointZmin.append(float(zm))

            # Se envia la data a procesarse
            self.clc.calcularDistaciaElongacion(self.pointXmax, self.pointXmin,
                                                self.pointYmax, self.pointYmin,
                                                self.pointZmax, self.pointZmin)

        except shapefile.ShapefileException:
            arcpy.AddError("No se ha podido leer el archivo especificado")
            arcpy.AddMessage("{0}".format(directory))
        except KeyError:
            arcpy.AddError(
                "El archivo debe contener las siguientes coordenadas de las cuencas: {0}"
                .format(columnsneeded))
        else:
            return 1
        return 0
Exemplo n.º 15
0
 def load_world(self, world_file):
     return shapefile.Reader(world_file, encoding='latin1').shapeRecords()
Exemplo n.º 16
0
 def iterload(self):
     import shapefile
     self.sf = shapefile.Reader(str(self.source))
     self.reloadCols()
     for shaperec in Progress(self.sf.iterShapeRecords(), total=self.sf.numRecords):
         yield shaperec
Exemplo n.º 17
0
import shapefile
import shapely
from shapely.geometry import Point

#setup database connection
try:
    conn = psycopg2.connect(
        "dbname='dbbuurt' user='******' host='localhost' password='******'")
    print("Database connection established")
except:
    print("Database connection failed")

cur = conn.cursor()

#download list of gm codes
gemeenten = shapefile.Reader("gem_2018")
gemeenteshapes = gemeenten.shapeRecords()

gmcodes = []
for gemeente in gemeenteshapes:
    gmcodes.append(gemeente.record[0])

#download shapefile
wijken = shapefile.Reader("wijk_2018")
wijkshapes = wijken.shapeRecords()

#postgresl comms
stembureaucaller = """SELECT id, bureau, plaats, xc, yc FROM stembureau WHERE gem_code = %s"""
addphrase = """UPDATE stembureau SET wk_code = %s WHERE id = %s"""

#create list of stembureaus
def trypyshp():
    shpf = shapefile.Reader('/home/ash/Data/tl_2014_39049_roads/tl_2014_39049_roads')
    
    pass
    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.

"""
import shapefile
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Polygon
from descartes.patch import PolygonPatch

"""
 IMPORT THE SHAPEFILE 
"""
shp_file_base='american_community_survey_blk_grp_2010_2014'
dat_dir='../shapefiles/'+shp_file_base +'/'
sf = shapefile.Reader(dat_dir+shp_file_base)

print 'number of shapes imported:',len(sf.shapes())


"""
       PLOTTING
"""

"""    Find max/min of record of interest (for scaling the facecolor)"""

# get list of field names, pull out appropriate index
fld = sf.fields[1:]
field_names = [field[0] for field in fld]
print 'record field names:',field_names
fld_name='MED_HH_INC'
Exemplo n.º 20
0
1.447155e+03,1.498224e+03,1.551094e+03,1.605828e+03,1.662493e+03,
1.721156e+03,1.781888e+03,1.844761e+03,1.909852e+03,1.977238e+03,
2.047000e+03,2.119223e+03,2.193992e+03,2.271398e+03,2.351534e+03,
2.434496e+03,2.520384e+03,2.609300e+03,2.701352e+03,2.796650e+03,
2.895309e+03,2.997448e+03,3.103188e+03,3.212656e+03,3.325986e+03,
3.443312e+03,3.564775e+03,3.690522e+03,3.820703e+03,3.955475e+03,
4.095000e+03,4.239445e+03,4.388984e+03,4.543796e+03,4.704068e+03,
4.869992e+03,5.041768e+03,5.219600e+03,5.403705e+03,5.594301e+03,
0           ,0           ,0           ,0                      ,0])

###############
# Graphics
###############

path_shp = Path(current_folder, 'data','swiss_border_shp', 'Border_CH.shp')
BORDER_SHP = shapefile.Reader(str(path_shp))


###############
# Data retrieval
###############
FOLDER_DATABASE = '/store/msrad/radar/radar_database/'
FOLDER_RADAR = '/store/msrad/radar/swiss/data/'
FOLDER_CPCCV = '/store/msrad/radar/cpc_validation/'
COSMO1_START = datetime.datetime(2015,10,1)
FOLDER_RADAR = '/store/msrad/radar/swiss/data/'
FOLDER_COSMO1 = '/store/s83/owm/COSMO-1/'
FOLDER_COSMO1_T = '/store/s83/owm/COSMO-1/ORDERS/MDR/'
FOLDER_COSMO2_T = '/store/msrad/cosmo/cosmo2/data/'
FILTER_COMMAND = '~owm/bin/fxfilter'
CONVERT_COMMAND = '~owm/bin/fxconvert'
Exemplo n.º 21
0
from os import path
from shapely.geometry import Point, Polygon
from numpy import array, zeros_like, where
try:
    from tools.nsha_tools import get_field_data, get_shp_centroid
except:
    print 'Add PYTHONPATH to NSHA18 root directory'

###############################################################################
# parse AUS6 shp exported from MIF
###############################################################################

domshp = 'Domains_Sep2011_edit.shp'

print 'Reading source shapefile...'
sf = shapefile.Reader(domshp)
shapes = sf.shapes()
polygons = []
for poly in shapes:
    polygons.append(Polygon(poly.points))

# get src name
src_name = get_field_data(sf, 'CODE', 'str')

###############################################################################
# parse Domains lookup csv
###############################################################################

domcsv = 'Domains_Sep2011_lookup.csv'

dom = []
Exemplo n.º 22
0
def convert(path, output_name_prefix):
    """
    The main conversion method
    """
    # get projection information
    prj_file = path + '.prj'
    prj_text = open(prj_file, 'r').read()
    srs = osr.SpatialReference()
    if srs.ImportFromWkt(prj_text):
        raise ValueError("Error importing PRJ information from: %s" % prj_file)
    in_projection = pyproj.Proj(srs.ExportToProj4())
    # process shapes
    geojson = {'type': 'FeatureCollection', 'features': []}
    # read attribute table records
    sf = shapefile.Reader(path)
    fields = {}
    num = 0
    for field in sf.fields:
        if field[0] == 'DeletionFlag':
            continue
        fields[field[0]] = {
            'name': field[0],
            'type': field[1],
            'sequence_number': num
        }
        num += 1
    # convert shapes
    #print fields.keys()
    for r in sf.shapeRecords():
        shape = r.shape
        record = r.record
        rec_id = None
        if 'NUMMER' in fields:
            rec_id = record[fields['NUMMER']['sequence_number']]
        elif 'OBJECTID' in fields:
            rec_id = record[fields['OBJECTID']['sequence_number']]
        elif 'ID' in fields:
            rec_id = record[fields['ID']['sequence_number']]
        feature = shape.__geo_interface__
        projected_feature = {
            'type': 'Feature',
            'geometry': {
                'coordinates': [],
                'type': 'Polygon'  # hard coded...
            },
            'id': rec_id,
            'properties': {}
        }
        # add properties from records
        for key in fields:
            fieldname = fields[key]['name']
            val = record[fields[key]['sequence_number']]
            if type(val) == str:
                val = val.decode('latin-1')
            projected_feature['properties'][fieldname] = val
        for ring in feature['coordinates']:
            projected_ring = []
            for c in ring:
                p = pyproj.transform(in_projection, out_projection, c[0], c[1])
                projected_ring.append(p)
            projected_feature['geometry']['coordinates'].append(projected_ring)
        geojson['features'].append(projected_feature)
    write_geojson(geojson, output_name_prefix + '.geojson')
    write_kml(geojson, output_name_prefix, output_name_prefix + '.kml')
Exemplo n.º 23
0
        nodes[ni][3] = float(vy)
        ni += 1
    except:
        pass

nodes = np.array(nodes)

print("\nCreating initial drop positions...")
if path_shape is None:
    x_min, y_min, vx_min, vy_min = nodes.min(axis=0)
    x_max, y_max, vx_max, vy_max = nodes.max(axis=0)
    drops = np.random.rand(nodes.shape[0], 2)
    drops[:, 0] = x_min + drops[:, 0] * (x_max - x_min)
    drops[:, 1] = y_min + drops[:, 1] * (y_max - y_min)
else:
    sf = shapefile.Reader(path_shape)
    bboxes = []
    for i in range(0, len(sf.shapes())):
        x_min, y_min, x_max, y_max = sf.bbox

        drops = np.random.rand(nodes.shape[0], 2)
        drops[:, 0] = x_min + drops[:, 0] * (x_max - x_min)
        drops[:, 1] = y_min + drops[:, 1] * (y_max - y_min)

        print(" -> ONLY USING FIRST FEATURE")
        break

print("\nBuilding KDTree...")
kdtree = KDTree(nodes[:, (0, 1)])

Exemplo n.º 24
0
#!/usr/bin/env python3
"""Use pyshp to explore Arlington County, Virginia roads data"""
import shapefile

sf = shapefile.Reader('data/Roads')
sr = sf.shapeRecords()

print("sf = shapefile.Reader('data/Roads'), sr = sf.shapeRecords()")
print('type(sr): {0}'.format(type(sr)))

print('len(sr): {0}'.format(len(sr)))

print('type(sr[0]): {0}'.format(type(sr[0])))

print("Names that don't start with '__' in sr[0]:")
print([name for name in dir(sr[0]) if not name.startswith('__')])

print('type(sr[0].record): {0}'.format(type(sr[0].record)))
print('type(sr[0].shape): {0}'.format(type(sr[0].shape)))

print('sr[0].record): {0}'.format(sr[0].record))

print("Names that don't start with '__' in sr[0].shape:")
print([name for name in dir(sr[0].shape) if not name.startswith('__')])

print('sf.fields: {0}'.format(sf.fields))
Exemplo n.º 25
0
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 28 17:47:03 2019

@author: FannyShafira
"""

import shapefile  #mengimport shapefile
sf = shapefile.Reader(
    "NamaFile")  #digunakan untuk membaca file tanpa menggunakan ekstensi
sf.bbox  #membaca boundary box atau batas kotak

# In[]
Exemplo n.º 26
0
import psycopg2
import shapefile

locale = input("Locale: ")
name = input("Name: ")

fpath = "Resources/{locale}/{name}".format(locale=locale, name=name)

print("Reading file")
sh = shapefile.Reader(fpath)
print("Fetching records...")
records = sh.records()
print("Fetched all records ({})".format(len(records)))

print("Connection Databse..")
conn = psycopg2.connect(
    host="localhost",
    port=5433,
    user="******",
    password="******",
    database="geo"
)
cur = conn.cursor()
print("Succesfully connected to Database with cursor {}".format(id(cur)))

vals = []

print("Making values...")
for rec in records:
    if rec[19]:
        vals.append(
Exemplo n.º 27
0
    # lat_lon_df = get_lat_lon(sf)
    # margin = 0.01

    # plt.xlim(min(lat_lon_df['latitude'])/2-margin,
    # 	max(lat_lon_df['latitude'])*2+margin)
    # plt.ylim(min(lat_lon_df['longitude'])/2-margin,
    # 	max(lat_lon_df['longitude'])*2+margin)

    limits = get_boundaries(sf)
    print("limits", limits)
    plt.xlim(limits[0] - 39050, limits[1] + 39050)
    plt.ylim(limits[2], limits[3])


sf = shapefile.Reader("taxi_zones/taxi_zones.shp")
'''Record #0: [1, 0.116357453189, 0.0007823067885, 'Newark Airport', 1, 'EWR']
'''
'''
[('DeletionFlag', 'C', 1, 0),
['OBJECTID', 'N', 9, 0], 
['Shape_Leng', 'F', 19, 11], 
['Shape_Area', 'F', 19, 11], 
['zone', 'C', 254, 0], 
['LocationID', 'N', 4, 0], 
['borough', 'C', 254, 0]]
'''
fields_name = [field[0] for field in sf.fields[1:]]
shp_dic = dict(zip(fields_name, list(range(len(fields_name)))))
attributes = sf.records()
shp_attr = [dict(zip(fields_name, attr)) for attr in attributes]
Exemplo n.º 28
0
import shapefile #mengimport shapefile
sf = shapefile.Reader("Tugas4/Soal1.shp") #membaca file shp yang bernama Soal1 di dalam folder Tugas4
sb = sf.bbox # membaca bbox nya
print(sb) #menampilkan isi dari variable sb
Exemplo n.º 29
0
import shapefile

sf = shapefile.Reader("mpa_inventory_2014_public_shp.shp")
Exemplo n.º 30
0
parser.add_argument(
    '--input_shapes',
    '-is',
    required=True,
    help='The file which contains definitions of the neighborhood shapes')

if __name__ == "__main__":
    args = parser.parse_args()
    logging.info('Using input crime file ' + str(args.input_crime))
    logging.info('Using input geographic shape file ' + str(args.input_shapes))

    outFileName = ""
    if args.output == 'output.csv':
        outFileName = os.path.splitext(
            args.input_crime)[0] + '_w_Zillow_Neighborhoods.csv'
    else:
        outFileName = args.output
    logging.info('Output tranform file will be saved to ' + str(outFileName))
    """ Read in the shapes defined in the Zillow Neighborhood shape file """
    r = shapefile.Reader(args.input_shapes)
    shapes = r.shapes()
    encodings = r.records()
    """ Create class instance for GPS to Zillow neighborhood to store shapes and crime data """
    neihgborhoodFinder = gps2zh.GPStoZNeighborhood(
        pd.read_csv(args.input_crime), shapes, encodings)
    """ Call the function which will find the zillow neighbor hood for each crime data """
    inputCrimeWZillowNH = neihgborhoodFinder.add_zillow_neighborhood_column()

    logging.info("Saving output dataframe to " + outFileName)
    inputCrimeWZillowNH.to_csv(outFileName, index=None, header=True)