Esempio n. 1
0
def calculate_green_spaces(division_dir, green_spaces_dir):

    division_path = find_shp_path(division_dir)

    if type(green_spaces_dir) != list:
        green_spaces_dir = [green_spaces_dir]
    green_spaces_paths = [find_shp_path(i) for i in green_spaces_dir]

    # create spatial index for green spaces
    green_spaces = []
    for green_spaces_path in green_spaces_paths:
        green_spaces.extend(list(iter_shp_as_shapely(green_spaces_path)))
    green_spaces_idx = create_spatial_index(green_spaces)

    divisions = list(iter_shp_as_shapely(division_path))
    total_divisions = len(divisions)

    green_relevance = {}
    for index, (id_division, division_shp) in enumerate(divisions):
        area = division_shp.area
        area_without_green = calc_area(division_shp, green_spaces,
                                       green_spaces_idx)
        area_green = area - area_without_green
        green_relevance[id_division] = area_green / area

        progress_msg = unicode(index) + " of " + unicode(total_divisions)
        print(progress_msg, end="\r" * len(progress_msg))

    return green_relevance
Esempio n. 2
0
def create_buffered_shp(directory, distance, buffer_dir=BUFFER_DIR,
                        resolution=16, recalculate=False,
                        context_shp_or_polygon=None):
    context_polygon = create_context_polygon(context_shp_or_polygon)

    shp_path = pf.find_shp_path(directory)
    shp_name = _create_shp_name(shp_path, distance)
    # print("\n".join([directory, shp_name, buffer_dir]))
    buffer_shp_path = _create_shp_path(directory, shp_name, buffer_dir)
    # print(buffer_shp_path)

    if not os.path.isfile(buffer_shp_path + ".shp") or recalculate:

        # read shapefile with pyshp
        sf_est = shapefile.Reader(shp_path)

        # create buffers from shapely shapes
        buffer_shapes = []
        for shape in geo_utils.get_shapely_shapes(sf_est):
            if not context_polygon or context_polygon.contains(shape):
                buffer_shapes.append(create_buffer(shape, distance,
                                                   resolution,
                                                   context_polygon))

        write_shapefile(sf_est, buffer_shapes, buffer_shp_path)
        utils.copy_prj(shp_path, buffer_shp_path)
Esempio n. 3
0
def read_dbf(shp_dir_or_dbf_path):
    if os.path.isdir(shp_dir_or_dbf_path):
        dbf_path = find_shp_path(shp_dir_or_dbf_path) + ".dbf"
    else:
        dbf_path = shp_dir_or_dbf_path
    dbf = Dbf5(dbf_path)
    return dbf.to_dataframe()
Esempio n. 4
0
def create_buffered_shp(directory,
                        distance,
                        buffer_dir=BUFFER_DIR,
                        resolution=16,
                        recalculate=False,
                        context_shp_or_polygon=None):
    context_polygon = create_context_polygon(context_shp_or_polygon)

    shp_path = pf.find_shp_path(directory)
    shp_name = _create_shp_name(shp_path, distance)
    # print("\n".join([directory, shp_name, buffer_dir]))
    buffer_shp_path = _create_shp_path(directory, shp_name, buffer_dir)
    # print(buffer_shp_path)

    if not os.path.isfile(buffer_shp_path + ".shp") or recalculate:

        # read shapefile with pyshp
        sf_est = shapefile.Reader(shp_path)

        # create buffers from shapely shapes
        buffer_shapes = []
        for shape in geo_utils.get_shapely_shapes(sf_est):
            if not context_polygon or context_polygon.contains(shape):
                buffer_shapes.append(
                    create_buffer(shape, distance, resolution,
                                  context_polygon))

        write_shapefile(sf_est, buffer_shapes, buffer_shp_path)
        utils.copy_prj(shp_path, buffer_shp_path)
Esempio n. 5
0
def read_dbf(shp_dir_or_dbf_path):
    if os.path.isdir(shp_dir_or_dbf_path):
        dbf_path = find_shp_path(shp_dir_or_dbf_path) + ".dbf"
    else:
        dbf_path = shp_dir_or_dbf_path
    dbf = Dbf5(dbf_path)
    return dbf.to_dataframe()
Esempio n. 6
0
def prj_to_proj4(prj_or_shp_path):
    if prj_or_shp_path[-4:] == ".prj":
        prj_path = prj_or_shp_path
    else:
        prj_path = find_shp_path(prj_or_shp_path) + ".prj"

    crs = pycrs.loader.from_file(prj_path)
    return crs
Esempio n. 7
0
    def test_find_shp_path(self):

        directory = "shp/transporte/subte-estaciones"
        shp_path = pf.find_shp_path(directory)
        exp_shp_path = os.path.join(
            pf.get_project_dir(),
            "shp/transporte/subte-estaciones/estaciones_de_subte")

        self.assertEqual(shp_path, exp_shp_path)
Esempio n. 8
0
    def test_find_shp_path(self):

        directory = "shp/transporte/subte-estaciones"
        shp_path = pf.find_shp_path(directory)
        exp_shp_path = os.path.join(
            pf.get_project_dir(),
            "shp/transporte/subte-estaciones/estaciones_de_subte")

        self.assertEqual(shp_path, exp_shp_path)
Esempio n. 9
0
def count_points(points_shp_path, polygons_shp_path):
    points_shp_path = pf.find_shp_path(points_shp_path)
    polygons_shp_path = pf.find_shp_path(polygons_shp_path)

    points = dict(iter_shp_as_shapely(points_shp_path))
    polygons = dict(iter_shp_as_shapely(polygons_shp_path))

    count = {id_polygon: 0 for id_polygon in polygons.iterkeys()}
    for id_polygon, polygon in polygons.iteritems():
        remove_points = []

        for id_point, point in points.iteritems():
            if polygon.contains(point):
                count[id_polygon] += 1
                remove_points.append(id_point)

        for remove_point in remove_points:
            del points[remove_point]

    return count
Esempio n. 10
0
def count_points(points_shp_path, polygons_shp_path):
    points_shp_path = pf.find_shp_path(points_shp_path)
    polygons_shp_path = pf.find_shp_path(polygons_shp_path)

    points = dict(iter_shp_as_shapely(points_shp_path))
    polygons = dict(iter_shp_as_shapely(polygons_shp_path))

    count = {id_polygon: 0 for id_polygon in polygons.iterkeys()}
    for id_polygon, polygon in polygons.iteritems():
        remove_points = []

        for id_point, point in points.iteritems():
            if polygon.contains(point):
                count[id_polygon] += 1
                remove_points.append(id_point)

        for remove_point in remove_points:
            del points[remove_point]

    return count
Esempio n. 11
0
def create_shp_paths_dict(shps_dir, replacements=None):
    """Create a dict with paths to shps inside shps_dir directory."""

    shp_paths_dict = {}
    abs_shp_dirs = [os.path.join(shps_dir, shp_dir) for shp_dir in os.listdir(shps_dir)]
    for shp_dir in (i for i in abs_shp_dirs if os.path.isdir(i) and i[0] != "."):
        buffer_tag = shp_dir.split("-")[-1]

        shp_name = os.path.basename(shp_dir).replace("-" + buffer_tag, "")
        if replacements and shp_name in replacements:
            shp_name = replacements[shp_name]

        shp_path = pf.find_shp_path(os.path.join(shps_dir, shp_dir))
        shp_paths_dict[shp_name + "-" + buffer_tag] = shp_path

    return shp_paths_dict
Esempio n. 12
0
def create_shp_paths_dict(shps_dir, replacements=None):
    """Create a dict with paths to shps inside shps_dir directory."""

    shp_paths_dict = {}
    abs_shp_dirs = [os.path.join(shps_dir, shp_dir) for shp_dir in
                    os.listdir(shps_dir)]
    for shp_dir in (i for i in abs_shp_dirs if
                    os.path.isdir(i) and i[0] != "."):
        buffer_tag = shp_dir.split("-")[-1]

        shp_name = os.path.basename(shp_dir).replace("-" + buffer_tag, "")
        if replacements and shp_name in replacements:
            shp_name = replacements[shp_name]

        shp_path = pf.find_shp_path(os.path.join(shps_dir, shp_dir))
        shp_paths_dict[shp_name + "-" + buffer_tag] = shp_path

    return shp_paths_dict
def create_shp_paths_dict(shps_dir, replacements=None):
    """Create a dict with paths to shps inside shps_dir directory."""

    shp_paths_dict = {}
    abs_shp_dirs = [os.path.join(shps_dir, shp_dir) for shp_dir in
                    os.listdir(shps_dir)]

    for shp_dir in (i for i in abs_shp_dirs if
                    os.path.isdir(i) and i[0] != "."):

        shp_name = os.path.basename(shp_dir).split("_")[1]
        if replacements and shp_name in replacements:
            shp_name = replacements[shp_name]

        shp_path = find_shp_path(shp_dir)
        shp_paths_dict[shp_name] = shp_path

    return shp_paths_dict
Esempio n. 14
0
import pandas as pd
from shapely.geometry import LineString, Point, Polygon
from shapely.ops import cascaded_union
from create_buffers import write_shapefile
# from calculate_weights import find_intersections
from path_finders import get_division_path, find_shp_path, get_indicators_path
from create_indicators import get_or_create_indicators_df
import time
import shapefile
from path_finders import get_transport_shp_path, get_division_path
from geo_utils import iter_shp_as_shapely, get_shapely_shapes
from shapely.geometry import LineString, Point, Polygon, MultiPolygon, MultiPoint

from global_vars import IDS_GCBA, AREA_LEVEL_SHP_NAME

SHP_INDIC_RADIO = find_shp_path(os.path.join("indicadores",
                                             "radios_censo_2010"))


# AUXILIARY
def find_intersections(shape, shapes):
    """Find the shapes that intersect shape."""
    # correct any topological errors of self-intersection
    shape = shape.buffer(0)

    for shp in shapes:
        if shape.intersects(shp):
            yield shp


def create_spatial_index(points, tolerance):
    """Create and rtree optimized spatial index from points.
Esempio n. 15
0
def recalculate_indicators(new_shp_dir,
                           area_level,
                           skip=None,
                           subcategory=None,
                           omit_fields=None,
                           by_area=None):
    skip = skip or []
    by_area = by_area or []

    # some fields are omitted always
    if omit_fields:
        if not type(omit_fields) == list:
            omit_fields = [omit_fields]
        omit_fields = OMIT_FIELDS
        omit_fields.extend(omit_fields)

    new_shp_path = find_shp_path(new_shp_dir)
    shp_name = os.path.basename(new_shp_path)

    sf = shapefile.Reader(new_shp_path)
    df_indicators = get_indicators(area_level)
    weights = get_weights(new_shp_path, area_level)

    w = shapefile.Writer(shapefile.POLYGON)

    indicators = _get_indicator_names(df_indicators)
    for field in sf.fields[1:]:
        w.field(*field)
    for indicator in indicators:
        field = [str(indicator), str("N"), 20, 18]
        # print(indicator)
        w.field(*field)
    w.field(str("area_km2"), str("N"), 20, 18)
    w.field(str("hab_km2"), str("N"), 20, 18)
    # print(w.fields)

    for record_shape in sf.iterShapeRecords():
        record = record_shape.record
        shape = record_shape.shape

        # print(record[0])
        if type(record[0]) == int:
            id_record = unicode(record[0])
        else:
            id_record = unicode(record[0].decode("utf-8"))

        if len(weights[id_record]) > 0:
            calculated_indicators = _calc_indicators(indicators, df_indicators,
                                                     weights[id_record],
                                                     area_level, skip, by_area)
            # print(calculated_indicators)
            record.extend(calculated_indicators)

            area = calculate_area(shape) / 1000000
            record.append(area)

            population = calculated_indicators[indicators.index(POPULATION)]
            pop_density = population / area
            record.append(pop_density)

            w.record(*record)

            w.poly(shapeType=shapefile.POLYGON, parts=[shape.points])

    path = get_indicators_shp_path(shp_name, subcategory)
    w.save(path)

    utils.copy_prj(new_shp_path.decode("utf-8"), path)
Esempio n. 16
0
def calculate_intersect_weights(division_dir,
                                buffer_dir,
                                weights_path,
                                empty_dirs=None,
                                force_buffer_sum=True):
    """Calculate perecentage of division_dir shapes intersecting buffer_dir.

    Find which shapes in the division shapefile intersect with each shape in
    buffer, and how much surface of the division shape is intersecting as well
    as how much of the buffer shape is being intersected by that division
    shape.

    If a list with "empty shapefiles" is provided, the surfaces from
    those shps that intersect with the buffer will not be taken into account
    for the calculation of weights. They will be subtracted both from divisions
    and buffers.

    Args:
        division_dir (str): Directory where shapefile with "inner shapes" is
            in this case the polygons used to divide the greater shape.
        buffer_dir (str): Directory where shapefile with "container shapes" is,
            in this case the buffers calculated over points or lines.
        empty_dirs (list): Paths to shapefiles with surfaces that shouldn't be
            taken into account in the calculation.

    Returns:
        dict: The perecentage of intersected surface is returned as calculated
            over total division area and total buffer area in a dictionary like
            this dict[id_buffer][id_division][division]

        >>> {
            "id_buffer1":
                {"id_division1": {"division": %_intersect_division_surface,
                                  "buffer": %_intersect_buffer_surface},
                 "id_division2": {"division": %_intersect_division_surface,
                                  "buffer": %_intersect_buffer_surface}
                },
            }
    """
    division_path = find_shp_path(division_dir)
    buffer_path = find_shp_path(buffer_dir)
    if empty_dirs:
        empty_paths = [find_shp_path(shp) for shp in empty_dirs]

    # create spatial index for divisions
    divisions = list(geo_utils.iter_shp_as_shapely(division_path))
    divisions_idx = create_spatial_index(divisions)

    # create spatial index for empty shapes
    if empty_dirs:
        empty_shps = []
        for empty_path in empty_paths:
            empty_shps.extend(list(geo_utils.iter_shp_as_shapely(empty_path)))

        empty_idx = create_spatial_index(empty_shps)

    weighted_intersections = {}
    for id_buffer, buffer_shp in geo_utils.iter_shp_as_shapely(buffer_path):
        buffer_shp = buffer_shp.buffer(0)
        weighted_intersections[id_buffer] = {}

        if empty_dirs:
            buffer_area = calc_area(buffer_shp, empty_shps, empty_idx)
        else:
            buffer_area = buffer_shp.area
        assert buffer_area > 0, "Buffer area can't be 0 " + unicode(id_buffer)

        intersect_generator = find_intersections(buffer_shp, divisions,
                                                 divisions_idx)

        for id_division, division_shp in intersect_generator:
            division_shp = division_shp.buffer(0)

            if empty_dirs:
                division_area = calc_area(division_shp, empty_shps, empty_idx)
            else:
                division_area = division_shp.area

            try:
                intersect = buffer_shp.intersection(division_shp)
                if empty_dirs:
                    intersect_area = calc_area(intersect, empty_shps,
                                               empty_idx)
                else:
                    intersect_area = intersect.area
            except Exception as inst:
                print("id_divison:", id_division,
                      "couldn't be intersected with id_buffer:", id_buffer)
                print(inst, "\n")

            weighted_intersections[id_buffer][id_division] = {
                "division": round(intersect_area / division_area, 30),
                "buffer": round(intersect_area / buffer_area, 30)
            }

        if force_buffer_sum:
            total_w = sum([
                i["buffer"]
                for i in weighted_intersections[id_buffer].itervalues()
            ])
            for id_division in weighted_intersections[id_buffer]:
                weighted_intersections[id_buffer][id_division][
                    "buffer"] /= total_w

    save_to_json(weighted_intersections, weights_path)
    return weighted_intersections
Esempio n. 17
0
def calculate_intersect_weights(division_dir, buffer_dir, weights_path, empty_dirs=None, force_buffer_sum=True):
    """Calculate perecentage of division_dir shapes intersecting buffer_dir.

    Find which shapes in the division shapefile intersect with each shape in
    buffer, and how much surface of the division shape is intersecting as well
    as how much of the buffer shape is being intersected by that division
    shape.

    If a list with "empty shapefiles" is provided, the surfaces from
    those shps that intersect with the buffer will not be taken into account
    for the calculation of weights. They will be subtracted both from divisions
    and buffers.

    Args:
        division_dir (str): Directory where shapefile with "inner shapes" is
            in this case the polygons used to divide the greater shape.
        buffer_dir (str): Directory where shapefile with "container shapes" is,
            in this case the buffers calculated over points or lines.
        weights_path (str): Json path where calculated weights will be saved.
        empty_dirs (list): Paths to shapefiles with surfaces that shouldn't be
            taken into account in the calculation.
        force_buffer_sum (bool): Recalculate weights of a buffer so they always
            sum 1.0.

    Returns:
        dict: The perecentage of intersected surface is returned as calculated
            over total division area and total buffer area in a dictionary like
            this dict[id_buffer][id_division][division]

        >>> {
            "id_buffer1":
                {"id_division1": {"division": %_intersect_division_surface,
                                  "buffer": %_intersect_buffer_surface},
                 "id_division2": {"division": %_intersect_division_surface,
                                  "buffer": %_intersect_buffer_surface}
                },
            }
    """
    division_path = pf.find_shp_path(division_dir)
    buffer_path = pf.find_shp_path(buffer_dir)
    if empty_dirs:
        empty_paths = [pf.find_shp_path(shp) for shp in empty_dirs]

    # create spatial index for divisions
    divisions = list(geo_utils.iter_shp_as_shapely(division_path))
    divisions_idx = create_spatial_index(divisions)

    # create spatial index for empty shapes
    if empty_dirs:
        empty_shps = []
        for empty_path in empty_paths:
            empty_shps.extend(list(geo_utils.iter_shp_as_shapely(empty_path)))

        empty_idx = create_spatial_index(empty_shps)

    weighted_intersections = {}
    for id_buffer, buffer_shp in geo_utils.iter_shp_as_shapely(buffer_path):
        buffer_shp = buffer_shp.buffer(0)
        weighted_intersections[id_buffer] = {}

        if empty_dirs:
            buffer_area = calc_area(buffer_shp, empty_shps, empty_idx)
        else:
            buffer_area = buffer_shp.area
        assert buffer_area > 0, "Buffer area can't be 0 " + unicode(id_buffer)

        intersect_generator = find_intersections(buffer_shp, divisions, divisions_idx)

        for id_division, division_shp in intersect_generator:
            division_shp = division_shp.buffer(0)

            if empty_dirs:
                division_area = calc_area(division_shp, empty_shps, empty_idx)
            else:
                division_area = division_shp.area

            try:
                intersect = buffer_shp.intersection(division_shp)
                if empty_dirs:
                    intersect_area = calc_area(intersect, empty_shps, empty_idx)
                else:
                    intersect_area = intersect.area
            except Exception as inst:
                print("id_divison:", id_division, "couldn't be intersected with id_buffer:", id_buffer)
                print(inst, "\n")

            weighted_intersections[id_buffer][id_division] = {
                "division": round(intersect_area / division_area, 30),
                "buffer": round(intersect_area / buffer_area, 30),
            }

        if force_buffer_sum:
            total_w = sum([i["buffer"] for i in weighted_intersections[id_buffer].itervalues()])
            for id_division in weighted_intersections[id_buffer]:
                weighted_intersections[id_buffer][id_division]["buffer"] /= total_w

    save_to_json(weighted_intersections, weights_path)
    return weighted_intersections
Esempio n. 18
0
def recalculate_indicators(new_shp_dir, area_level, skip=None,
                           subcategory=None, omit_fields=None, by_area=None):
    skip = skip or []
    by_area = by_area or []

    # some fields are omitted always
    if omit_fields:
        if not type(omit_fields) == list:
            omit_fields = [omit_fields]
        omit_fields = OMIT_FIELDS
        omit_fields.extend(omit_fields)

    new_shp_path = find_shp_path(new_shp_dir)
    shp_name = os.path.basename(new_shp_path)

    sf = shapefile.Reader(new_shp_path)
    df_indicators = get_indicators(area_level)
    weights = get_weights(new_shp_path, area_level)

    w = shapefile.Writer(shapefile.POLYGON)

    indicators = _get_indicator_names(df_indicators)
    for field in sf.fields[1:]:
        w.field(*field)
    for indicator in indicators:
        field = [str(indicator), str("N"), 20, 18]
        # print(indicator)
        w.field(*field)
    w.field(str("area_km2"), str("N"), 20, 18)
    w.field(str("hab_km2"), str("N"), 20, 18)
    # print(w.fields)

    for record_shape in sf.iterShapeRecords():
        record = record_shape.record
        shape = record_shape.shape

        # print(record[0])
        if type(record[0]) == int:
            id_record = unicode(record[0])
        else:
            id_record = unicode(record[0].decode("utf-8"))

        if len(weights[id_record]) > 0:
            calculated_indicators = _calc_indicators(indicators,
                                                     df_indicators,
                                                     weights[id_record],
                                                     area_level,
                                                     skip, by_area)
            # print(calculated_indicators)
            record.extend(calculated_indicators)

            area = calculate_area(shape) / 1000000
            record.append(area)

            population = calculated_indicators[indicators.index(POPULATION)]
            pop_density = population / area
            record.append(pop_density)

            w.record(*record)

            w.poly(shapeType=shapefile.POLYGON, parts=[shape.points])

    path = get_indicators_shp_path(shp_name, subcategory)
    w.save(path)

    utils.copy_prj(new_shp_path.decode("utf-8"), path)
Esempio n. 19
0
import pandas as pd
from shapely.geometry import LineString, Point, Polygon
from shapely.ops import cascaded_union
from create_buffers import write_shapefile
# from calculate_weights import find_intersections
from path_finders import get_division_path, find_shp_path, get_indicators_path
from create_indicators import get_or_create_indicators_df
import time
import shapefile
from path_finders import get_transport_shp_path, get_division_path
from geo_utils import iter_shp_as_shapely, get_shapely_shapes
from shapely.geometry import LineString, Point, Polygon, MultiPolygon, MultiPoint

from global_vars import IDS_GCBA, AREA_LEVEL_SHP_NAME

SHP_INDIC_RADIO = find_shp_path(
    os.path.join("indicadores", "radios_censo_2010"))


# AUXILIARY
def find_intersections(shape, shapes):
    """Find the shapes that intersect shape."""
    # correct any topological errors of self-intersection
    shape = shape.buffer(0)

    for shp in shapes:
        if shape.intersects(shp):
            yield shp


def create_spatial_index(points, tolerance):
    """Create and rtree optimized spatial index from points.