Example #1
0
def points_to_facility(netDataset,
                       rdv_name,
                       points,
                       facilities,
                       outTable,
                       oneway=None,
                       save_result_input=None):
    """
    Execute Closest Facility and save the result in the points table
    """

    from gasp.cpu.arcg.mng.fld import add_field
    from gasp.cpu.arcg.mng.fld import calc_fld
    from gasp.cpu.arcg.mng.joins import join_table

    arcpy.env.overwriteOutput = True

    closest_facility(netDataset,
                     rdv_name,
                     facilities,
                     points,
                     outTable,
                     oneway_restriction=oneway)

    if save_result_input:
        add_field(outTable, 'j', "SHORT", 6)
        calc_fld(outTable, 'j', "[IncidentID]-1")
        join_table(points, "FID", outTable, "j", "Total_Minu")
Example #2
0
def polygons_to_facility(netdataset,
                         polygons,
                         facilities,
                         outTbl,
                         oneway=None,
                         rdv=None,
                         junctions=None,
                         save_result_input=None):
    """
    Execute the Closest Facility tool after calculation of polygons
    centroids
    """

    from gasp.cpu.arcg.lyr import feat_lyr
    from gasp.cpu.arcg.mng.feat import feat_to_pnt
    from gasp.cpu.arcg.mng.fld import add_field
    from gasp.cpu.arcg.mng.fld import calc_fld
    from gasp.cpu.arcg.mng.joins import join_table

    arcpy.env.overwriteOutput = True

    # Polygons to Points
    polLyr = feat_lyr(polygons)
    pntShp = os.path.join(
        os.path.dirname(polygons),
        os.path.splitext(os.path.basename(polygons))[0] + '_pnt.shp')
    pntShp = feat_to_pnt(polLyr, pntShp, pnt_position='INSIDE')

    closest_facility(netdataset,
                     facilities,
                     pntShp,
                     outTbl,
                     oneway_restriction=oneway,
                     rdv=rdv,
                     junc=junctions)

    field_output = 'dst' + os.path.splitext(os.path.basename(facilities))[0]
    add_field(outTbl, field_output[:10], "FLOAT", "10", "3")
    calc_fld(outTbl, field_output[:10], "[Total_Minu]")

    if save_result_input:
        add_field(outTbl, 'j', "SHORT", "6")
        calc_fld(outTbl, 'j', "[IncidentID]-1")
        join_table(polLyr, "FID", outTbl, "j", field_output[:10])
Example #3
0
def arcg_mean_time_WByPop(netDt,
                          rdv,
                          infraestruturas,
                          unidades,
                          conjuntos,
                          popf,
                          w,
                          output,
                          oneway=None):
    """
    Tempo medio ponderado pela populacao residente a infra-estrutura mais
    proxima (min)
    
    * netDt = Path to Network Dataset
    * infraestruturas = Points of destiny
    * unidades = BGRI; Freg; Concelhos
    * conjuntos = Freg; Concelhos; NUT - field
    * popf = Field with the population of the statistic unity
    * w = Workspace
    * output = Path to store the final output
    * rdv = Name of feature class with the streets network
    """

    import arcpy
    import os
    from gasp.cpu.arcg.lyr import feat_lyr
    from gasp.cpu.arcg.mng.feat import feat_to_pnt
    from gasp.cpu.arcg.mng.fld import add_field
    from gasp.cpu.arcg.mng.fld import calc_fld
    from gasp.cpu.arcg.mng.joins import join_table
    from gasp.mng.genze import dissolve
    from gasp.mng.gen import copy_feat
    from gasp.mob.arctbx.closest import closest_facility

    def get_freg_denominator(shp, groups, population, fld_time="Total_Minu"):
        cursor = arcpy.SearchCursor(shp)

        groups_sum = {}
        for lnh in cursor:
            group = lnh.getValue(groups)
            nrInd = float(lnh.getValue(population))
            time = float(lnh.getValue(fld_time))

            if group not in groups_sum.keys():
                groups_sum[group] = time * nrInd

            else:
                groups_sum[group] += time * nrInd

        del cursor, lnh

        return groups_sum

    arcpy.env.overwriteOutput = True
    arcpy.env.workspace = w

    # Start Procedure #
    # Create copy of statitic unities to preserve the original data
    copy_unities = copy_feat(unidades,
                             os.path.join(w, os.path.basename(unidades)),
                             gisApi='arcpy')

    # Generate centroids of the statistic unities - unidades
    lyr_unidades = feat_lyr(copy_unities)
    pnt_unidades = feat_to_pnt(lyr_unidades, 'pnt_unidades.shp')

    # Network Processing - Distance between CENTROID and Destiny points
    closest_facility(netDt,
                     rdv,
                     infraestruturas,
                     pnt_unidades,
                     os.path.join(w, "cls_table.dbf"),
                     oneway_restriction=oneway)
    add_field("cls_table.dbf", 'j', "SHORT", "6")
    calc_fld("cls_table.dbf", 'j', "[IncidentID]-1")
    join_table(lyr_unidades, "FID", "cls_table.dbf", "j", "Total_Minu")

    # Calculo dos somatorios por freguesia (conjunto)
    groups = get_freg_denominator(lyr_unidades, conjuntos, popf)
    add_field(lyr_unidades, "tm", "FLOAT", "10", "3")

    cs = arcpy.UpdateCursor(lyr_unidades)
    linha = cs.next()
    while linha:
        group = linha.getValue(conjuntos)
        t = float(linha.getValue("Total_Minu"))
        p = int(linha.getValue(popf))
        total = groups[group]
        indi = ((t * p) / total) * t
        linha.setValue("tm", indi)
        cs.updateRow(linha)
        linha = cs.next()

    return dissolve(lyr_unidades,
                    output,
                    conjuntos,
                    statistics="tm SUM",
                    api="arcpy")
Example #4
0
def population_within_point_buffer(netDataset,
                                   rdvName,
                                   pointShp,
                                   populationShp,
                                   popField,
                                   bufferDist,
                                   epsg,
                                   output,
                                   workspace=None,
                                   bufferIsTimeMinutes=None,
                                   useOneway=None):
    """
    Assign to points the population within a certain distance (metric or time)
    
    * Creates a Service Area Polygon for each point in pointShp;
    * Intersect the Service Area Polygons with the populationShp;
    * Count the number of persons within each Service Area Polygon
    (this number will be weighted by the area % of the statistic unit
    intersected with the Service Area Polygon).
    """

    import arcpy
    import os
    from geopandas import GeoDataFrame
    from gasp.cpu.arcg.lyr import feat_lyr
    from gasp.cpu.arcg.anls.ovlay import intersect
    from gasp.mng.gen import copy_feat
    from gasp.cpu.arcg.mng.fld import add_geom_attr
    from gasp.cpu.arcg.mng.fld import add_field
    from gasp.cpu.arcg.mng.fld import calc_fld
    from gasp.mng.genze import dissolve
    from gasp.mob.arctbx.svarea import service_area_use_meters
    from gasp.mob.arctbx.svarea import service_area_polygon
    from gasp.fm import tbl_to_obj
    from gasp.to.shp import df_to_shp

    workspace = os.path.dirname(pointShp) if not workspace else workspace

    if not os.path.exists(workspace):
        from gasp.oss.ops import create_folder
        workspace = create_folder(workspace, overwrite=False)

    # Copy population layer
    populationShp = copy_feat(
        populationShp,
        os.path.join(workspace,
                     'cop_{}'.format(os.path.basename(populationShp))),
        gisApi='arcpy')

    # Create layer
    pntLyr = feat_lyr(pointShp)
    popLyr = feat_lyr(populationShp)

    # Create Service Area
    if not bufferIsTimeMinutes:
        servArea = service_area_use_meters(
            netDataset,
            rdvName,
            bufferDist,
            pointShp,
            os.path.join(workspace,
                         'servare_{}'.format(os.path.basename(pointShp))),
            OVERLAP=False,
            ONEWAY=useOneway)

    else:
        servArea = service_area_polygon(
            netDataset,
            rdvName,
            bufferDist,
            pointShp,
            os.path.join(workspace,
                         "servare_{}".format(os.path.basename(pointShp))),
            ONEWAY_RESTRICTION=useOneway,
            OVERLAP=None)

    servAreaLyr = feat_lyr(servArea)

    # Add Column with Polygons area to Feature Class population
    add_geom_attr(popLyr, "total", geom_attr="AREA")

    # Intersect buffer and Population Feature Class
    intSrc = intersect([servAreaLyr, popLyr],
                       os.path.join(workspace, "int_servarea_pop.shp"))

    intLyr = feat_lyr(intSrc)

    # Get area of intersected statistical unities with population
    add_geom_attr(intLyr, "partarea", geom_attr="AREA")

    # Get population weighted by area intersected
    calc_fld(intLyr, "population",
             "((([partarea] * 100) / [total]) * [{}]) / 100".format(popField),
             {
                 "TYPE": "DOUBLE",
                 "LENGTH": "10",
                 "PRECISION": "3"
             })

    # Dissolve service area by Facility ID
    diss = dissolve(intLyr,
                    os.path.join(workspace, 'diss_servpop.shp'),
                    "FacilityID",
                    statistics="population SUM")

    # Get original Point FID from FacilityID
    calc_fld(diss, "pnt_fid", "[FacilityID] - 1", {
        "TYPE": "INTEGER",
        "LENGTH": "5",
        "PRECISION": None
    })

    dfPnt = tbl_to_obj(pointShp)
    dfDiss = tbl_to_obj(diss)

    dfDiss.rename(columns={"SUM_popula": "n_pessoas"}, inplace=True)

    resultDf = dfPnt.merge(dfDiss,
                           how='inner',
                           left_index=True,
                           right_on="pnt_fid")

    resultDf.drop('geometry_y', axis=1, inplace=True)

    resultDf = GeoDataFrame(resultDf,
                            crs={'init': 'epsg:{}'.format(epsg)},
                            geometry='geometry_x')

    df_to_shp(resultDf, output)

    return output
Example #5
0
def pop_less_dist_x2(net_dataset,
                     rdv_name,
                     locations,
                     interval,
                     unities,
                     fld_groups,
                     fld_pop,
                     w,
                     output,
                     useOneway=None):
    """
    Network processing - executar service area de modo a conhecer as areas a
    menos de x minutos de qualquer coisa
    """

    import arcpy
    import numpy
    import os
    import pandas
    from gasp.cpu.arcg.lyr import feat_lyr
    from gasp.mng.genze import dissolve
    from gasp.cpu.arcg.anls.ovlay import intersect
    from gasp.cpu.arcg.mng.fld import calc_fld
    from gasp.mob.arctbx.svarea import service_area_polygon
    from gasp.fm import tbl_to_obj
    from gasp.oss import get_filename
    from gasp.to.shp import df_to_shp
    from gasp.cpu.arcg.mng.fld import del_field

    if arcpy.CheckExtension("Network") == "Available":
        arcpy.CheckOutExtension("Network")
    # Procedure #
    # Generate Service Area
    svArea = service_area_polygon(net_dataset,
                                  rdv_name,
                                  interval,
                                  locations,
                                  os.path.join(w, "servarea.shp"),
                                  ONEWAY_RESTRICTION=useOneway)

    # Dissolve Service Area
    svArea = dissolve(svArea,
                      os.path.join(w, 'svarea_diss.shp'),
                      "FID",
                      api="arcpy")

    # Intersect unities with Service Area
    lyr_unities = feat_lyr(unities)
    unities_servarea = intersect([lyr_unities, svArea],
                                 os.path.join(w, "unidades_mx.shp"))

    # In the original Unities SHP, create a col with the population
    # only for the unities intersected with service area
    intersectDf = tbl_to_obj(unities_servarea)

    unities_less_than = intersectDf[fld_pop].unique()
    unities_less_than = pandas.DataFrame(unities_less_than, columns=['cod_'])

    popDf = tbl_to_obj(unities)
    popDf = popDf.merge(unities_less_than,
                        how='outer',
                        left_on=fld_pop,
                        right_on="cod_")
    popDf["less_than"] = popDf.cod_.fillna(value='0')
    popDf["less_than"] = numpy.where(popDf["less_than"] != '0', '1', '0')
    popDf["population"] = numpy.where(popDf["less_than"] == '1',
                                      popDf[fld_pop], 0)
    popDf["original"] = popDf[fld_pop]

    newUnities = df_to_shp(popDf, os.path.join(w, 'unities_pop.shp'))

    # Dissolve and Get result
    result = dissolve(newUnities,
                      output,
                      fld_groups,
                      statistics="original SUM;population SUM",
                      api="arcpy")

    calc_fld(result, "pop_{}".format(interval), "[SUM_popula]", {
        "TYPE": "INTEGER",
        "LENGTH": "10",
        "PRECISION": ""
    })

    calc_fld(result, fld_pop, "[SUM_origin]", {
        "TYPE": "INTEGER",
        "LENGTH": "10",
        "PRECISION": ""
    })

    calc_fld(result, "pop_{}_p".format(interval),
             "([pop_{}] / [{}]) *100".format(interval, fld_pop), {
                 "TYPE": "DOUBLE",
                 "LENGTH": "6",
                 "PRECISION": "2"
             })

    del_field(result, "SUM_popula")
    del_field(result, "SUM_origin")

    return result
Example #6
0
def mean_time_by_influence_area(netDt,
                                rdv,
                                infraestruturas,
                                fld_infraestruturas,
                                unidades,
                                id_unidade,
                                conjuntos,
                                popf,
                                influence_areas_unities,
                                w,
                                output,
                                oneway=True):
    """
    Tempo medio ponderado pela populacao residente a infra-estrutura mais
    proxima (min), por area de influencia
    
    * netDt - Path to Network Dataset
    * infraestruturas - Points of destiny
    * fld_infraestruturas - Field on destiny points to relate with influence area
    * unidades - BGRI; Freg; Concelhos
    * conjuntos - Freg; Concelhos; NUT - field
    * popf - Field with the population of the statistic unity
    * influence_areas_unities - Field on statistic unities layer to relate
    with influence area
    * w = Workspace
    * output = Path to store the final output
    * rdv - Name of feature class with the streets network
    * junctions - Name of feature class with the junctions
    """

    import arcpy
    import os
    from gasp.cpu.arcg.lyr import feat_lyr
    from gasp.cpu.arcg.mng.feat import feat_to_pnt
    from gasp.cpu.arcg.mng.gen import merge
    from gasp.mng.gen import copy_feat
    from gasp.mng.genze import dissolve
    from gasp.cpu.arcg.mng.fld import add_field
    from gasp.cpu.arcg.mng.fld import calc_fld
    from gasp.cpu.arcg.mng.fld import field_statistics
    from gasp.cpu.arcg.mng.fld import type_fields
    from gasp.cpu.arcg.mng.joins import join_table
    from gasp.cpu.arcg.anls.exct import select_by_attr
    from gasp.cpu.arcg.netanlst.closest import closest_facility
    """if arcpy.CheckExtension("Network") == "Available":
        arcpy.CheckOutExtension("Network")
    
    else:
        raise ValueError('Network analyst extension is not avaiable')"""
    def ListGroupArea(lyr, fld_ia, fld_grp):
        d = {}
        cs = arcpy.SearchCursor(lyr)
        for lnh in cs:
            id_group = lnh.getValue(fld_grp)
            id_ia = lnh.getValue(fld_ia)
            if id_group not in d.keys():
                d[id_group] = [id_ia]
            else:
                if id_ia not in d[id_group]:
                    d[id_group].append(id_ia)
        return d

    arcpy.env.overwriteOutput = True
    arcpy.env.workspace = w

    # Procedure #
    copy_unities = copy_feat(unidades,
                             os.path.join(w, os.path.basename(unidades)),
                             gisApi='arcpy')

    # Generate centroids of the statistic unities - unidades
    lyr_unidades = feat_lyr(copy_unities)
    pnt_unidades = feat_to_pnt(lyr_unidades,
                               'pnt_unidades.shp',
                               pnt_position="INSIDE")
    # List all groups of unities (conjuntos)
    group_areas = ListGroupArea(lyr_unidades, influence_areas_unities,
                                conjuntos)
    # Create Layers
    lyr_pnt_unidades = feat_lyr(pnt_unidades)
    lyr_pnt_facilities = feat_lyr(infraestruturas)

    result_list = []

    fld_type_unities = type_fields(lyr_pnt_unidades, field=conjuntos)
    SELECT_UNITIES = '{fld}=\'{c}\'' if str(fld_type_unities) == 'String' \
        else '{fld}={c}'

    fld_type_facilities = type_fields(lyr_pnt_facilities,
                                      field=fld_infraestruturas)
    SELECT_FACILITIES = '{fld}=\'{obj}\'' if str(fld_type_facilities) == 'String' \
        else '{fld}={obj}'
    for group in group_areas.keys():
        # Select centroids of interest
        interest_centroids = select_by_attr(
            lyr_pnt_unidades, SELECT_UNITIES.format(c=str(group),
                                                    fld=conjuntos),
            'pnt_{c}.shp'.format(c=str(group)))
        # Select facilities of interest
        expression = ' OR '.join([
            SELECT_FACILITIES.format(fld=fld_infraestruturas,
                                     obj=str(group_areas[group][i]))
            for i in range(len(group_areas[group]))
        ])

        interest_facilities = select_by_attr(
            lyr_pnt_facilities, expression,
            'facilities_{c}.shp'.format(c=str(group)))
        # Run closest facilitie - Distance between selected CENTROID and selected facilities
        cls_fac_table = os.path.join(w, "clsf_{c}.dbf".format(c=str(group)))
        closest_facility(netDt,
                         rdv,
                         interest_facilities,
                         interest_centroids,
                         cls_fac_table,
                         oneway_restriction=oneway)
        add_field(cls_fac_table, 'j', "SHORT", "6")
        calc_fld(cls_fac_table, 'j', "[IncidentID]-1")
        join_table(interest_centroids, "FID", cls_fac_table, "j", "Total_Minu")
        # Calculate sum of time x population
        add_field(interest_centroids, 'sum', "DOUBLE", "10", "3")
        calc_fld(interest_centroids, 'sum',
                 "[{pop}]*[Total_Minu]".format(pop=popf))
        denominador = field_statistics(interest_centroids, 'sum', 'SUM')
        add_field(interest_centroids, 'tm', "DOUBLE", "10", "3")
        calc_fld(
            interest_centroids, 'tm',
            "([sum]/{sumatorio})*[Total_Minu]".format(
                sumatorio=str(denominador)))
        result_list.append(interest_centroids)

    merge_shp = merge(result_list, "merge_centroids.shp")
    join_table(lyr_unidades, id_unidade, "merge_centroids.shp", id_unidade,
               "tm")

    return dissolve(lyr_unidades,
                    output,
                    conjuntos,
                    statistics="tm SUM",
                    api='arcpy')
Example #7
0
def check_shape_diff(SHAPES_TO_COMPARE,
                     OUT_FOLDER,
                     REPORT,
                     conPARAM,
                     DB,
                     SRS_CODE,
                     GIS_SOFTWARE="GRASS",
                     GRASS_REGION_TEMPLATE=None):
    """
    Script to check differences between pairs of Feature Classes
    
    Suponha que temos diversas Feature Classes (FC) e que cada uma delas
    possui um determinado atributo; imagine também que,
    considerando todos os pares possíveis entre estas FC,
    se pretende comparar as diferenças na distribuição dos valores
    desse atributo em cada par.
    
    * Dependências:
    - ArcGIS;
    - GRASS;
    - PostgreSQL;
    - PostGIS.
    
    * GIS_SOFTWARE Options:
    - ARCGIS;
    - GRASS.
    """

    import datetime
    import os
    import pandas
    from gasp.fm.sql import query_to_df
    from gasp.sql.mng.tbl import tbls_to_tbl
    from gasp.sql.mng.geom import fix_geom, check_geomtype_in_table
    from gasp.sql.mng.geom import select_main_geom_type
    from gasp.sql.mng.qw import ntbl_by_query
    from gasp.prop.ff import check_isRaster
    from gasp.oss import get_filename
    from gasp.sql.mng.db import create_db
    from gasp.to.sql import shp_to_psql, df_to_db
    from gasp.to.shp import rst_to_polyg
    from gasp.to.shp import shp_to_shp, psql_to_shp
    from gasp.to import db_to_tbl

    # Check if folder exists, if not create it
    if not os.path.exists(OUT_FOLDER):
        from gasp.oss.ops import create_folder
        create_folder(OUT_FOLDER, overwrite=None)
    else:
        raise ValueError('{} already exists!'.format(OUT_FOLDER))

    # Start GRASS GIS Session if GIS_SOFTWARE == GRASS
    if GIS_SOFTWARE == "GRASS":
        if not GRASS_REGION_TEMPLATE:
            raise ValueError(
                'To use GRASS GIS you need to specify GRASS_REGION_TEMPLATE')

        from gasp.session import run_grass

        gbase = run_grass(OUT_FOLDER,
                          grassBIN='grass76',
                          location='shpdif',
                          srs=GRASS_REGION_TEMPLATE)

        import grass.script as grass
        import grass.script.setup as gsetup

        gsetup.init(gbase, OUT_FOLDER, 'shpdif', 'PERMANENT')

        from gasp.mng.grstbl import rename_col
        from gasp.to.shp.grs import shp_to_grs, grs_to_shp
        from gasp.to.rst import rst_to_grs
        from gasp.mng.fld import rename_column

    # Convert to SHAPE if file is Raster
    # Import to GRASS GIS if GIS SOFTWARE == GRASS
    i = 0
    _SHP_TO_COMPARE = {}
    for s in SHAPES_TO_COMPARE:
        isRaster = check_isRaster(s)

        if isRaster:
            if GIS_SOFTWARE == "ARCGIS":
                d = rst_to_polyg(s,
                                 os.path.join(os.path.dirname(s),
                                              get_filename(s) + '.shp'),
                                 gisApi='arcpy')

                _SHP_TO_COMPARE[d] = "gridcode"

            elif GIS_SOFTWARE == "GRASS":
                # To GRASS
                rstName = get_filename(s)
                inRst = rst_to_grs(s, "rst_" + rstName, as_cmd=True)
                # To Raster
                d = rst_to_polyg(inRst,
                                 rstName,
                                 rstColumn="lulc_{}".format(i),
                                 gisApi="grasscmd")

                # Export Shapefile
                shp = grs_to_shp(d, os.path.join(OUT_FOLDER, d + '.shp'),
                                 "area")

                _SHP_TO_COMPARE[shp] = "lulc_{}".format(i)

        else:
            if GIS_SOFTWARE == "ARCGIS":
                _SHP_TO_COMPARE[s] = SHAPES_TO_COMPARE[s]

            elif GIS_SOFTWARE == "GRASS":
                # To GRASS
                grsV = shp_to_grs(s, get_filename(s), asCMD=True)

                # Change name of column with comparing value
                rename_col(grsV,
                           SHAPES_TO_COMPARE[s],
                           "lulc_{}".format(i),
                           as_cmd=True)

                # Export
                shp = grs_to_shp(grsV,
                                 os.path.join(OUT_FOLDER, grsV + '_rn.shp'),
                                 "area")

                _SHP_TO_COMPARE[shp] = "lulc_{}".format(i)

        i += 1

    SHAPES_TO_COMPARE = _SHP_TO_COMPARE
    if GIS_SOFTWARE == "ARCGIS":
        from gasp.cpu.arcg.mng.fld import calc_fld
        from gasp.cpu.arcg.mng.wspace import create_geodb
        from gasp.mng.gen import copy_feat

        # Sanitize data and Add new field
        __SHAPES_TO_COMPARE = {}
        i = 0

        # Create GeoDatabase
        geodb = create_geodb(OUT_FOLDER, 'geo_sanitize')
        """ Sanitize Data """
        for k in SHAPES_TO_COMPARE:
            # Send data to GeoDatabase only to sanitize
            newFc = shp_to_shp(k,
                               os.path.join(geodb, get_filename(k)),
                               gisApi='arcpy')

            # Create a copy to change
            newShp = copy_feat(newFc,
                               os.path.join(OUT_FOLDER, os.path.basename(k)),
                               gisApi='arcpy')

            # Sanitize field name with interest data
            NEW_FLD = "lulc_{}".format(i)
            calc_fld(newShp,
                     NEW_FLD,
                     "[{}]".format(SHAPES_TO_COMPARE[k]),
                     isNewField={
                         "TYPE": "INTEGER",
                         "LENGTH": 5,
                         "PRECISION": ""
                     })

            __SHAPES_TO_COMPARE[newShp] = NEW_FLD

            i += 1

    else:
        __SHAPES_TO_COMPARE = SHAPES_TO_COMPARE

    # Create database
    conPARAM["DATABASE"] = create_db(conPARAM, DB)
    """ Union SHAPEs """

    UNION_SHAPE = {}
    FIX_GEOM = {}

    def fix_geometry(shp):
        # Send data to PostgreSQL
        nt = shp_to_psql(conPARAM, shp, SRS_CODE, api='shp2pgsql')

        # Fix data
        corr_tbl = fix_geom(conPARAM,
                            nt,
                            "geom",
                            "corr_{}".format(nt),
                            colsSelect=['gid', __SHAPES_TO_COMPARE[shp]])

        # Check if we have multiple geometries
        geomN = check_geomtype_in_table(conPARAM, corr_tbl)

        if geomN > 1:
            corr_tbl = select_main_geom_type(conPARAM, corr_tbl,
                                             "corr2_{}".format(nt))

        # Export data again
        newShp = psql_to_shp(conPARAM,
                             corr_tbl,
                             os.path.join(OUT_FOLDER, corr_tbl + '.shp'),
                             api='pgsql2shp',
                             geom_col='geom')

        return newShp

    SHPS = __SHAPES_TO_COMPARE.keys()
    for i in range(len(SHPS)):
        for e in range(i + 1, len(SHPS)):
            if GIS_SOFTWARE == 'ARCGIS':
                # Try the union thing
                unShp = union(SHPS[i],
                              SHPS[e],
                              os.path.join(OUT_FOLDER,
                                           "un_{}_{}.shp".format(i, e)),
                              api_gis="arcpy")

                # See if the union went all right
                if not os.path.exists(unShp):
                    # Union went not well

                    # See if geometry was already fixed
                    if SHPS[i] not in FIX_GEOM:
                        # Fix SHPS[i] geometry
                        FIX_GEOM[SHPS[i]] = fix_geometry(SHPS[i])

                    if SHPS[e] not in FIX_GEOM:
                        FIX_GEOM[SHPS[e]] = fix_geometry(SHPS[e])

                    # Run Union again
                    unShp = union(FIX_GEOM[SHPS[i]],
                                  FIX_GEOM[SHPS[e]],
                                  os.path.join(OUT_FOLDER,
                                               "un_{}_{}_f.shp".format(i, e)),
                                  api_gis="arcpy")

            elif GIS_SOFTWARE == "GRASS":
                # Optimized Union
                print "Union between {} and {}".format(SHPS[i], SHPS[e])
                time_a = datetime.datetime.now().replace(microsecond=0)
                __unShp = optimized_union_anls(
                    SHPS[i],
                    SHPS[e],
                    os.path.join(OUT_FOLDER, "un_{}_{}.shp".format(i, e)),
                    GRASS_REGION_TEMPLATE,
                    SRS_CODE,
                    os.path.join(OUT_FOLDER, "work_{}_{}".format(i, e)),
                    multiProcess=True)
                time_b = datetime.datetime.now().replace(microsecond=0)
                print time_b - time_a

                # Rename cols
                unShp = rename_column(
                    __unShp, {
                        "a_" + __SHAPES_TO_COMPARE[SHPS[i]]:
                        __SHAPES_TO_COMPARE[SHPS[i]],
                        "b_" + __SHAPES_TO_COMPARE[SHPS[e]]:
                        __SHAPES_TO_COMPARE[SHPS[e]]
                    }, os.path.join(OUT_FOLDER, "un_{}_{}_rn.shp".format(i,
                                                                         e)))

            UNION_SHAPE[(SHPS[i], SHPS[e])] = unShp

    # Send data one more time to postgresql
    SYNTH_TBL = {}

    for uShp in UNION_SHAPE:
        # Send data to PostgreSQL
        union_tbl = shp_to_psql(conPARAM,
                                UNION_SHAPE[uShp],
                                SRS_CODE,
                                api='shp2pgsql')

        # Produce table with % of area equal in both maps
        areaMapTbl = ntbl_by_query(
            conPARAM,
            "{}_syn".format(union_tbl),
            ("SELECT CAST('{lulc_1}' AS text) AS lulc_1, "
             "CAST('{lulc_2}' AS text) AS lulc_2, "
             "round("
             "CAST(SUM(g_area) / 1000000 AS numeric), 4"
             ") AS agree_area, round("
             "CAST((SUM(g_area) / MIN(total_area)) * 100 AS numeric), 4"
             ") AS agree_percentage, "
             "round("
             "CAST(MIN(total_area) / 1000000 AS numeric), 4"
             ") AS total_area FROM ("
             "SELECT {map1_cls}, {map2_cls}, ST_Area(geom) AS g_area, "
             "CASE "
             "WHEN {map1_cls} = {map2_cls} "
             "THEN 1 ELSE 0 "
             "END AS isthesame, total_area FROM {tbl}, ("
             "SELECT SUM(ST_Area(geom)) AS total_area FROM {tbl}"
             ") AS foo2"
             ") AS foo WHERE isthesame = 1 "
             "GROUP BY isthesame").format(
                 lulc_1=get_filename(uShp[0]),
                 lulc_2=get_filename(uShp[1]),
                 map1_cls=__SHAPES_TO_COMPARE[uShp[0]],
                 map2_cls=__SHAPES_TO_COMPARE[uShp[1]],
                 tbl=union_tbl),
            api='psql')

        # Produce confusion matrix for the pair in comparison
        lulcCls = query_to_df(
            conPARAM,
            ("SELECT fcol FROM ("
             "SELECT CAST({map1_cls} AS text) AS fcol FROM {tbl} "
             "GROUP BY {map1_cls} "
             "UNION ALL SELECT CAST({map2_cls} AS text) FROM {tbl} "
             "GROUP BY {map2_cls}"
             ") AS foo GROUP BY fcol ORDER BY fcol").format(
                 tbl=union_tbl,
                 map1_cls=__SHAPES_TO_COMPARE[uShp[0]],
                 map2_cls=__SHAPES_TO_COMPARE[uShp[1]]),
            db_api='psql').fcol.tolist()

        matrixTbl = ntbl_by_query(
            conPARAM,
            "{}_matrix".format(union_tbl),
            ("SELECT * FROM crosstab('"
             "SELECT CASE "
             "WHEN foo.{map1_cls} IS NOT NULL "
             "THEN foo.{map1_cls} ELSE jtbl.flyr "
             "END AS lulc1_cls, CASE "
             "WHEN foo.{map2_cls} IS NOT NULL "
             "THEN foo.{map2_cls} ELSE jtbl.slyr "
             "END AS lulc2_cls, CASE "
             "WHEN foo.garea IS NOT NULL "
             "THEN round(CAST(foo.garea / 1000000 AS numeric)"
             ", 3) ELSE 0 "
             "END AS garea FROM ("
             "SELECT CAST({map1_cls} AS text) AS {map1_cls}, "
             "CAST({map2_cls} AS text) AS {map2_cls}, "
             "SUM(ST_Area(geom)) AS garea "
             "FROM {tbl} GROUP BY {map1_cls}, {map2_cls}"
             ") AS foo FULL JOIN ("
             "SELECT f.flyr, s.slyr FROM ("
             "SELECT CAST({map1_cls} AS text) AS flyr "
             "FROM {tbl} GROUP BY {map1_cls}"
             ") AS f, ("
             "SELECT CAST({map2_cls} AS text) AS slyr "
             "FROM {tbl} GROUP BY {map2_cls}"
             ") AS s"
             ") AS jtbl "
             "ON foo.{map1_cls} = jtbl.flyr AND "
             "foo.{map2_cls} = jtbl.slyr "
             "ORDER BY 1,2"
             "') AS ct("
             "lulc_cls text, {crossCols}"
             ")").format(crossCols=", ".join(
                 ["cls_{} numeric".format(c) for c in lulcCls]),
                         tbl=union_tbl,
                         map1_cls=__SHAPES_TO_COMPARE[uShp[0]],
                         map2_cls=__SHAPES_TO_COMPARE[uShp[1]]),
            api='psql')

        SYNTH_TBL[uShp] = {"TOTAL": areaMapTbl, "MATRIX": matrixTbl}

    # UNION ALL TOTAL TABLES
    total_table = tbls_to_tbl(conPARAM,
                              [SYNTH_TBL[k]["TOTAL"] for k in SYNTH_TBL],
                              'total_table')

    # Create table with % of agreement between each pair of maps
    mapsNames = query_to_df(
        conPARAM,
        ("SELECT lulc FROM ("
         "SELECT lulc_1 AS lulc FROM {tbl} GROUP BY lulc_1 "
         "UNION ALL "
         "SELECT lulc_2 AS lulc FROM {tbl} GROUP BY lulc_2"
         ") AS lu GROUP BY lulc ORDER BY lulc").format(tbl=total_table),
        db_api='psql').lulc.tolist()

    FLDS_TO_PIVOT = ["agree_percentage", "total_area"]

    Q = ("SELECT * FROM crosstab('"
         "SELECT CASE "
         "WHEN foo.lulc_1 IS NOT NULL THEN foo.lulc_1 ELSE jtbl.tmp1 "
         "END AS lulc_1, CASE "
         "WHEN foo.lulc_2 IS NOT NULL THEN foo.lulc_2 ELSE jtbl.tmp2 "
         "END AS lulc_2, CASE "
         "WHEN foo.{valCol} IS NOT NULL THEN foo.{valCol} ELSE 0 "
         "END AS agree_percentage FROM ("
         "SELECT lulc_1, lulc_2, {valCol} FROM {tbl} UNION ALL "
         "SELECT lulc_1, lulc_2, {valCol} FROM ("
         "SELECT lulc_1 AS lulc_2, lulc_2 AS lulc_1, {valCol} "
         "FROM {tbl}"
         ") AS tst"
         ") AS foo FULL JOIN ("
         "SELECT lulc_1 AS tmp1, lulc_2 AS tmp2 FROM ("
         "SELECT lulc_1 AS lulc_1 FROM {tbl} GROUP BY lulc_1 "
         "UNION ALL "
         "SELECT lulc_2 AS lulc_1 FROM {tbl} GROUP BY lulc_2"
         ") AS tst_1, ("
         "SELECT lulc_1 AS lulc_2 FROM {tbl} GROUP BY lulc_1 "
         "UNION ALL "
         "SELECT lulc_2 AS lulc_2 FROM {tbl} GROUP BY lulc_2"
         ") AS tst_2 WHERE lulc_1 = lulc_2 GROUP BY lulc_1, lulc_2"
         ") AS jtbl ON foo.lulc_1 = jtbl.tmp1 AND foo.lulc_2 = jtbl.tmp2 "
         "ORDER BY lulc_1, lulc_2"
         "') AS ct("
         "lulc_map text, {crossCols}"
         ")")

    TOTAL_AGREE_TABLE = None
    TOTAL_AREA_TABLE = None
    for f in FLDS_TO_PIVOT:
        if not TOTAL_AGREE_TABLE:
            TOTAL_AGREE_TABLE = ntbl_by_query(
                conPARAM,
                "agreement_table",
                Q.format(tbl=total_table,
                         valCol=f,
                         crossCols=", ".join([
                             "{} numeric".format(map_) for map_ in mapsNames
                         ])),
                api='psql')

        else:
            TOTAL_AREA_TABLE = ntbl_by_query(
                conPARAM,
                "area_table",
                Q.format(tbl=total_table,
                         valCol=f,
                         crossCols=", ".join([
                             "{} numeric".format(map_) for map_ in mapsNames
                         ])),
                api='psql')

    # Union Mapping
    UNION_MAPPING = pandas.DataFrame(
        [[
            get_filename(k[0]),
            get_filename(k[1]),
            get_filename(UNION_SHAPE[k])
        ] for k in UNION_SHAPE],
        columns=['shp_a', 'shp_b', 'union_shp'
                 ]) if GIS_SOFTWARE == "ARCGIS" else pandas.DataFrame(
                     [[k[0], k[1], get_filename(UNION_SHAPE[k])]
                      for k in UNION_SHAPE],
                     columns=['shp_a', 'shp_b', 'union_shp'])

    UNION_MAPPING = df_to_db(conPARAM, UNION_MAPPING, 'union_map', api='psql')

    # Export Results
    TABLES = [UNION_MAPPING, TOTAL_AGREE_TABLE, TOTAL_AREA_TABLE
              ] + [SYNTH_TBL[x]["MATRIX"] for x in SYNTH_TBL]

    SHEETS = ["union_map", "agreement_percentage", "area_with_data_km"] + [
        "{}_{}".format(get_filename(x[0])[:15],
                       get_filename(x[1])[:15]) for x in SYNTH_TBL
    ]

    db_to_xls(conPARAM, ["SELECT * FROM {}".format(x) for x in TABLES],
              REPORT,
              sheetsNames=SHEETS,
              dbAPI='psql')

    return REPORT
Example #8
0
def col_to_map_mlyr(main_mxd, lyrs_src, lyrs_joker, mapColsTbl, graphicsTbl,
                    out_maps, mapsFormat='.jpg', out_mxd=None):
    """
    Same as col_to_map, but this one allows multiple layers chaging
    
    lyrs_src = {
        "lyr1_name" : r'/path/to/datasource.shp',
        "lyr2_name" : r'/path/to/datasource.shp'
        ...
    }
    
    lyrs_joker = {
        "lyr1_name" : "joker_field_name",
        "lyr2_name" : "joker_field_name"
    }
    
    mapColsTbl = path to table with the relation between columns and map
                | lyr_1_name | ... | lyr_n_name 
     map_alias1 |  col_name  | ... |  col_name
     map_alias2 |  col_name  | ... |  col_name
     
    graphicsTbl = path to table with relation between maps and values to assign
    to graphic elements
                | element_1 | ... | element_n
     map_alias1 | some_text | ... | some_text
     map_alias2 | some_text | ... | some_text
    """
    
    import os
    from gasp.fm                import tbl_to_obj
    from gasp.cpu.arcg.mng.fld  import calc_fld
    from gasp.maps.arctbx.graph import get_elem_by_name
    from gasp.maps.arctbx.wmap  import mxd_saveas
    from gasp.maps.arctbx.wmap  import write_map
    
    mapsFormat = mapsFormat if mapsFormat[0] == '.' else '.' + mapsFormat
    
    # Get maps to do
    mapsToDo = tbl_to_obj(
        mapColsTbl, useFirstColAsIndex=True, output='dict'
    )
    
    # Get dict with association between maps and graphics elements
    graphicElem = tbl_to_obj(
        graphicsTbl, useFirstColAsIndex=True, output='dict'
    )
    
    # Create new maps
    for newMap in mapsToDo:
        print "Producing {} Map".format(newMap)
        
        # Update JOKER's FIELD's
        for lyr in mapsToDo[newMap]:
            calc_fld(
                lyrs_src[lyr], lyrs_joker[lyr],
                "[{}]".format(str(mapsToDo[newMap][lyr]))
            )
        
        # Open mxd and change it
        mxd = arcpy.mapping.MapDocument(main_mxd)
        
        # Get Text elements in the main mxd to be changed
        elem_names = graphicElem[newMap].keys()
        text_elem  = get_elem_by_name(mxd, elem_names)
        
        if not text_elem:
            print 'No Graphic/Text element was finded'
        
        for e in text_elem:
            text_elem[e].text = graphicElem[newMap][e]
        
        outmap = write_map(mxd, os.path.join(out_maps, newMap + mapsFormat))
        
        # Create a new mxd only because
        if out_mxd:
            mxd_saveas(mxd, os.path.join(out_mxd, newMap + '.mxd'))
        
        # Close mxd
        del mxd
        
        print "{} Map is done!".format(newMap)
Example #9
0
def col_to_map(main_mxd, lyrs_sourcedata, joker_field, layout_elm_file,
               mapsDir, mxdDir=None, maps_format='.jpg', includeMxdNameInMap=None):
    """
    TODO: ADD A MORE DETAILED DESCRIPTION
    
    WORK AROUND for col_to_map_v1
    if we change the valueField, the symbology will be reseted. So, lets
    change the values of the valueField
    
    GOAL: Create a map for every column in a layer - Use if you want
    to mantain layers symbology including colors and inverval breaks.
    
    * main_mxd = path to a mxd template (all maps produced will be like
    this template);
    
    * lyrs_sourcedata = path to the data source of the layers
    that will be changed from map to map;
    
    * joker_field = name of the field that will store the values to be
    mapped (same field but different values);
    
    * layout_elm_file = file with the indication of what text should appear
    in each text element of the layout when some column is used as value
    field;
    eg.,
    
                    | id_element_1 | ... | id_element_n
    col_value_name1 |  some_text   | ... |  some_text
    col_value_name2 |  some_text   | ... |  some_text
    
    -> The goal is to mantain classes values and number of classes and colors
     - change only value field.
    """
    
    import os
    from gasp.fm                import tbl_to_obj
    from gasp.cpu.arcg.mng.fld  import calc_fld
    from gasp.maps.arctbx.graph import get_elem_by_name
    from gasp.maps.arctbx.wmap  import mxd_saveas
    from gasp.maps.arctbx.wmap  import write_map
    
    if includeMxdNameInMap:
        from gasp.oss import get_filename
        mapBaseName = get_filename(main_mxd, forceLower=True) + "_"
    
    else:
        mapBaseName = ""
    
    maps_format = '.' + maps_format if maps_format[0] != '.' else maps_format
    
    # List Cols to be maped and graphic elements to be changed
    cols_and_elements = tbl_to_obj(
        layout_elm_file, useFirstColAsIndex=True, output='dict'
    )
    
    for col in cols_and_elements:
        print "Producing {} Map".format(col)
        # Update JOKER FIELD
        calc_fld(
            lyrs_sourcedata, joker_field, "[{}]".format(str(col))
        )
        
        # Open mxd and change it
        mxd = arcpy.mapping.MapDocument(main_mxd)
        
        # Get Text elements in the main mxd to be changed
        elem_names = cols_and_elements[cols_and_elements.keys()[0]].keys()
        text_elem  = get_elem_by_name(mxd, elem_names)
        
        if not text_elem:
            print 'No Graphic/Text element was finded'
        
        # Change every text elements
        for elm_name in text_elem:
            text_elem[elm_name].text = cols_and_elements[col][elm_name]
        
        outmap = write_map(mxd, os.path.join(
            mapsDir, "{}{}{}".format(mapBaseName, col, maps_format)
        ))
        
        # Create a new mxd
        if mxdDir:
            mxd_saveas(os.path.join(mxdDir, "{}{}{}".format(
                mapBaseName, col, '.mxd'
            )))
        
        # Close mxd
        del mxd
        
        print "{} Map is done!".format(col)