Esempio n. 1
0
def extract_random_features(inshp, nfeat, outshp, is_percentage=None):
    """
    Extract Random features from one Feature Class
    and save them in a new file
    """

    import numpy as np
    from glass.g.rd.shp import shp_to_obj
    from glass.g.wt.shp import obj_to_shp
    from glass.g.prop.prj import get_shp_epsg

    # Open data
    df = shp_to_obj(inshp)

    # Get number of random features
    n = int(round(nfeat * df.shape[0] / 100, 0)) if is_percentage else nfeat

    # Get random sample
    df['idx'] = df.index
    rnd = np.random.choice(df.idx, n, replace=False)

    # Filter features
    rnd_df = df[df.idx.isin(rnd)]

    rnd_df.drop('idx', axis=1, inplace=True)

    # Save result
    epsg = get_shp_epsg(inshp)
    return obj_to_shp(rnd_df, 'geometry', epsg, outshp)
Esempio n. 2
0
def otp_closest_facility(incidents, facilities, hourday, date, output):
    """
    Closest Facility using OTP
    """

    import os
    from glass.g.rd.shp import shp_to_obj
    from glass.g.prop.prj import get_shp_epsg
    from glass.g.wt.shp import obj_to_shp
    from glass.pys.oss import fprop
    from glass.g.prj.obj import df_prj
    from glass.g.mob.otp.log import clsfacility

    # Open Data
    incidents_df = df_prj(shp_to_obj(incidents), 4326)
    facilities_df = df_prj(shp_to_obj(facilities), 4326)

    # Run closest facility
    out_epsg = get_shp_epsg(incidents)
    res, logs = clsfacility(incidents_df,
                            facilities_df,
                            hourday,
                            date,
                            out_epsg=out_epsg)

    # Export result
    obj_to_shp(res, "geom", out_epsg, output)

    # Write logs
    if len(logs):
        with open(
                os.path.join(os.path.dirname(output),
                             fprop(output, 'fn') + '_log.txt'), 'w') as txt:
            for i in logs:
                txt.write(("Incident_id: {}\n"
                           "Facility_id: {}\n"
                           "ERROR message:\n"
                           "{}\n"
                           "\n\n\n\n\n\n").format(str(i[0]), str(i[1]),
                                                  str(i[2])))

    return output
Esempio n. 3
0
def count_pntinpol(inpnt, inpoly, cntcol, out):
    """
    Count points inside polygons
    """

    from glass.g.gp.ovl.obj import count_pnt_inside_poly
    from glass.g.rd.shp import shp_to_obj
    from glass.g.wt.shp import obj_to_shp
    from glass.g.prop.prj import get_shp_epsg

    # Open data
    pnt_df = shp_to_obj(inpnt)
    pol_df = shp_to_obj(inpoly)

    # Count points
    pol_df = count_pnt_inside_poly(pnt_df, cntcol, pol_df)

    # Export to file
    obj_to_shp(pol_df, "geometry", get_shp_epsg(inpoly), out)

    return out
Esempio n. 4
0
File: pop.py Progetto: jasp382/glass
def points_by_polutation(pnt,
                         mapunits,
                         popcol,
                         outcol,
                         output,
                         count_pnt=None,
                         inhabitants=1000,
                         pntattr=None):
    """
    Useful to calculate pharmacies by 1000 inabitants
    """

    import geopandas as gp
    from glass.g.rd.shp import shp_to_obj
    from glass.g.prop.prj import get_shp_epsg
    from glass.g.wt.shp import obj_to_shp
    from glass.g.gp.ovl.obj import count_pnt_inside_poly

    # Open Data
    pnt_df = shp_to_obj(pnt)
    units_df = shp_to_obj(mapunits)

    cpnt = 'count_pnt' if not count_pnt else count_pnt
    pntattr = None if not pntattr else pntattr \
        if pntattr in list(pnt_df.columns.values) else None
    inhabitants = 1 if not inhabitants else inhabitants

    units_df = count_pnt_inside_poly(pnt_df, cpnt, units_df, pntattr=pntattr)
    units_df[outcol] = (units_df[count_pnt] / units_df[popcol]) * inhabitants

    if not count_pnt:
        units_df.drop([cpnt], axis=1, inplace=True)

    obj_to_shp(units_df, "geometry", get_shp_epsg(mapunits), output)

    return output
Esempio n. 5
0
def otp_cf_based_on_rel(incidents, group_incidents_col, facilities,
                        facilities_id, rel_inc_fac, sheet, group_fk,
                        facilities_fk, hour, day, output):
    """
    Calculate time travel considering specific facilities
    for each group of incidents

    Relations between incidents and facilities are in a auxiliar table (rel_inc_fac).
    Auxiliar table must be a xlsx file
    """

    import os
    import pandas as pd
    from glass.ng.rd import tbl_to_obj
    from glass.g.rd.shp import shp_to_obj
    from glass.g.wt.shp import obj_to_shp
    from glass.g.mob.otp.log import clsfacility
    from glass.g.prop.prj import get_shp_epsg
    from glass.ng.pd import merge_df
    from glass.pys.oss import fprop
    from glass.g.prj.obj import df_prj

    # Avoid problems when facilities_id == facilities_fk
    facilities_fk = facilities_fk + '_fk' if facilities_id == facilities_fk else \
        facilities_fk

    # Open data
    idf = df_prj(shp_to_obj(incidents), 4326)
    fdf = df_prj(shp_to_obj(facilities), 4326)

    rel_df = tbl_to_obj(rel_inc_fac, sheet=sheet)

    oepsg = get_shp_epsg(incidents)

    # Relate facilities with incidents groups
    fdf = fdf.merge(rel_df,
                    how='inner',
                    left_on=facilities_id,
                    right_on=facilities_fk)

    # List Groups
    grp_df = pd.DataFrame({
        'cnttemp':
        idf.groupby([group_incidents_col])[group_incidents_col].agg('count')
    }).reset_index()

    # Do calculations
    res = []
    logs = []
    for idx, row in grp_df.iterrows():
        # Get incidents for that group
        new_i = idf[idf[group_incidents_col] == row[group_incidents_col]]

        # Get facilities for that group
        new_f = fdf[fdf[group_fk] == row[group_incidents_col]]

        # calculate closest facility
        cfres, l = clsfacility(new_i, new_f, hour, day, out_epsg=oepsg)

        res.append(cfres)
        logs.extend(l)

    # Merge results
    out_df = merge_df(res)

    # Recovery facility id
    fdf.drop([c for c in fdf.columns.values if c != facilities_id],
             axis=1,
             inplace=True)
    out_df = out_df.merge(fdf, how='left', left_on='ffid', right_index=True)

    # Export result
    obj_to_shp(out_df, "geom", oepsg, output)

    # Write logs
    if len(logs) > 0:
        with open(
                os.path.join(os.path.dirname(output),
                             fprop(output, 'fn') + '_log.txt'), 'w') as txt:
            for i in logs:
                txt.write(("Incident_id: {}\n"
                           "Facility_id: {}\n"
                           "ERROR message:\n"
                           "{}\n"
                           "\n\n\n\n\n\n").format(str(i[0]), str(i[1]),
                                                  str(i[2])))

    return output
Esempio n. 6
0
def otp_servarea(facilities, hourday, date, breaks, output, vel=None):
    """
    OTP Service Area
    """

    import requests
    import os
    from glass.cons.otp import ISO_URL
    from glass.g.rd.shp import shp_to_obj
    from glass.g.prj.obj import df_prj
    from glass.g.prop.prj import get_shp_epsg
    from glass.g.wt.shp import obj_to_shp
    from glass.pys.oss import fprop
    from glass.g.it.pd import json_obj_to_geodf
    from glass.ng.pd import merge_df
    from glass.pys import obj_to_lst

    breaks = obj_to_lst(breaks)

    # Open Data
    facilities_df = df_prj(shp_to_obj(facilities), 4326)

    # Place request parameters
    get_params = [('mode', 'WALK,TRANSIT'), ('date', date), ('time', hourday),
                  ('maxWalkDistance', 50000),
                  ('walkSpeed', 3 if not vel else vel)]

    breaks.sort()

    for b in breaks:
        get_params.append(('cutoffSec', b))

    # Do the math
    error_logs = []
    results = []

    for i, r in facilities_df.iterrows():
        fromPlace = str(r.geometry.y) + ',' + str(r.geometry.x)

        if not i:
            get_params.append(('fromPlace', fromPlace))
        else:
            get_params[-1] = ('fromPlace', fromPlace)

        resp = requests.get(ISO_URL,
                            get_params,
                            headers={'accept': 'application/json'})

        try:
            data = resp.json()
        except:
            error_logs.append([i, 'Cannot retrieve JSON Response'])
            continue

        gdf = json_obj_to_geodf(data, 4326)
        gdf['ffid'] = i

        results.append(gdf)

    # Merge all Isochrones
    df_res = merge_df(results)

    out_epsg = get_shp_epsg(facilities)

    if out_epsg != 4326:
        df_res = df_prj(df_res, out_epsg)

    obj_to_shp(df_res, "geometry", out_epsg, output)

    # Write logs
    if len(error_logs):
        with open(
                os.path.join(os.path.dirname(output),
                             fprop(output, 'fn') + '.log.txt'), 'w') as txt:
            for i in error_logs:
                txt.write(("Facility_id: {}\n"
                           "ERROR message:\n"
                           "{}\n"
                           "\n\n\n\n\n\n").format(str(i[0]), i[1]))

    return output
Esempio n. 7
0
File: pop.py Progetto: jasp382/glass
def pop_within_area(mapunits,
                    mapunits_id,
                    outcol,
                    subunits,
                    subunits_id,
                    pop_col,
                    mapunits_fk,
                    area_shp,
                    output,
                    res_areas=None,
                    res_areas_fk=None):
    """
    Used to calculate % pop exposta a ruidos
    superiores a 65db
    Useful to calculate population a menos de x minutos de um tipo
    de equipamento

    Retuns population % living inside some polygons
    """

    import os
    import pandas as pd
    from glass.g.rd.shp import shp_to_obj
    from glass.g.wt.rst import shpext_to_rst
    from glass.g.wt.shp import obj_to_shp
    from glass.pys.oss import mkdir, fprop
    from glass.g.gp.ovl import grsintersection
    from glass.g.prop.prj import get_epsg
    from glass.g.wenv.grs import run_grass

    # Prepare GRASS GIS Workspace configuration
    oname = fprop(output, 'fn')
    gw = mkdir(os.path.join(os.path.dirname(output), 'ww_' + oname),
               overwrite=True)

    # Boundary to Raster
    w_epsg = get_epsg(area_shp)
    ref_rst = shpext_to_rst(mapunits,
                            os.path.join(gw, 'extent.tif'),
                            cellsize=10,
                            epsg=w_epsg)

    # Create GRASS GIS Session
    loc = 'loc_' + oname
    gbase = run_grass(gw, location=loc, srs=ref_rst)

    import grass.script as grass
    import grass.script.setup as gsetup

    gsetup.init(gbase, gw, loc, 'PERMANENT')

    from glass.g.it.shp import shp_to_grs, grs_to_shp

    # Send data to GRASS GIS
    grs_res = shp_to_grs(
        res_areas if res_areas and res_areas_fk else subunits,
        fprop(res_areas if res_areas and res_areas_fk else subunits, 'fn'),
        asCMD=True)
    grs_ash = shp_to_grs(area_shp, fprop(area_shp, 'fn'), asCMD=True)

    # Run intersection
    int_ = grsintersection(grs_res,
                           grs_ash,
                           f'i_{grs_res}_{grs_ash}',
                           api='grass')

    # Export result
    res_int = grs_to_shp(int_, os.path.join(gw, int_ + '.shp'), 'area')

    # Compute new indicator
    mapunits_df = shp_to_obj(mapunits)
    subunits_df = shp_to_obj(subunits)
    if res_areas and res_areas_fk:
        resareas_df = shp_to_obj(res_areas)
    int______df = shp_to_obj(res_int)

    # For each bgri, get hab area with population
    if res_areas and res_areas_fk:
        resareas_df['gtarea'] = resareas_df.geometry.area

        # Group By
        respop = pd.DataFrame({
            'areav':
            resareas_df.groupby([res_areas_fk])['gtarea'].agg('sum')
        }).reset_index()

        # Join with subunits df
        respop.rename(columns={res_areas_fk: 'jtblfid'}, inplace=True)
        subunits_df = subunits_df.merge(respop,
                                        how='left',
                                        left_on=subunits_id,
                                        right_on='jtblfid')
        subunits_df.drop(['jtblfid'], axis=1, inplace=True)
    else:
        subunits_df['areav'] = subunits_df.geometry.area

    # For each subunit, get area intersecting area_shp
    int______df['gtarea'] = int______df.geometry.area

    int_id = 'a_' + res_areas_fk if res_areas and res_areas_fk else \
        'a_' + subunits_id
    area_int = pd.DataFrame({
        'areai':
        int______df.groupby([int_id])['gtarea'].agg('sum')
    }).reset_index()

    # Join with main subunits df
    area_int.rename(columns={int_id: 'jtblfid'}, inplace=True)

    subunits_df = subunits_df.merge(area_int,
                                    how='left',
                                    left_on=subunits_id,
                                    right_on='jtblfid')
    subunits_df.drop(['jtblfid'], axis=1, inplace=True)

    subunits_df.areai = subunits_df.areai.fillna(0)
    subunits_df.areav = subunits_df.areav.fillna(0)

    subunits_df['pop_af'] = (subunits_df.areai *
                             subunits_df[pop_col]) / subunits_df.areav

    subunits_pop = pd.DataFrame(
        subunits_df.groupby([mapunits_fk]).agg({
            pop_col: 'sum',
            'pop_af': 'sum'
        }))
    subunits_pop.reset_index(inplace=True)

    # Produce final table - mapunits table with new indicator
    subunits_pop.rename(columns={mapunits_fk: 'jtblid'}, inplace=True)

    mapunits_df = mapunits_df.merge(subunits_pop,
                                    how='left',
                                    left_on=mapunits_id,
                                    right_on='jtblid')
    mapunits_df[outcol] = (mapunits_df.pop_af * 100) / mapunits_df[pop_col]

    mapunits_df.drop(['jtblid', pop_col, 'pop_af'], axis=1, inplace=True)

    obj_to_shp(mapunits_df, 'geometry', w_epsg, output)

    return output
Esempio n. 8
0
File: pop.py Progetto: jasp382/glass
def shparea_by_mapunitpopulation(polygons,
                                 mapunits,
                                 units_id,
                                 outcol,
                                 output,
                                 units_pop=None,
                                 areacol=None):
    """
    Polygons area by mapunit or by mapunit population
    """

    import os
    import pandas as pd
    from glass.g.wt.rst import shpext_to_rst
    from glass.pys.oss import mkdir, fprop
    from glass.g.gp.ovl import grsintersection
    from glass.g.prop.prj import get_epsg
    from glass.g.wenv.grs import run_grass
    from glass.g.rd.shp import shp_to_obj
    from glass.g.wt.shp import obj_to_shp

    delareacol = 1 if not areacol else 0
    areacol = outcol if not units_pop else areacol if areacol else 'areav'

    # Prepare GRASS GIS Workspace configuration
    oname = fprop(output, 'fn')

    gw = mkdir(os.path.join(os.path.dirname(output), 'ww_' + oname),
               overwrite=True)

    # Boundary to raster
    w_epsg = get_epsg(mapunits)
    ref_rst = shpext_to_rst(mapunits,
                            os.path.join(gw, 'extent.tif'),
                            cellsize=10,
                            epsg=w_epsg)

    # Sanitize columns
    popunits_df_tmp = shp_to_obj(mapunits)

    drop_cols = [
        c for c in popunits_df_tmp.columns.values
        if c != units_id and c != 'geometry'
    ]
    popunits_df_tmp.drop(drop_cols, axis=1, inplace=True)

    popunits_i = obj_to_shp(popunits_df_tmp, 'geometry', w_epsg,
                            os.path.join(gw, 'popunits.shp'))

    # Create GRASS GIS Session
    _l = 'loc_' + oname

    gbase = run_grass(gw, location=_l, srs=ref_rst)

    import grass.script as grass
    import grass.script.setup as gsetup

    gsetup.init(gbase, gw, _l, 'PERMANENT')

    from glass.g.it.shp import shp_to_grs, grs_to_shp

    # Data to GRASS GIS
    g_popunits = shp_to_grs(popunits_i, fprop(mapunits, 'fn'), asCMD=True)
    g_polygons = shp_to_grs(polygons, fprop(polygons, 'fn'), asCMD=True)

    # Run intersection
    i_shp = grsintersection(g_popunits,
                            g_polygons,
                            f'i_{g_popunits[:5]}_{g_polygons[:5]}',
                            cmd=True)

    # Export result
    i_res = grs_to_shp(i_shp, os.path.join(gw, i_shp + '.shp'), 'area')

    # Open intersection result and mapunits
    mapunits_df = shp_to_obj(mapunits)
    int_df = shp_to_obj(i_res)

    int_df['garea'] = int_df.geometry.area

    int_gp = pd.DataFrame({
        areacol:
        int_df.groupby(['a_' + units_id])['garea'].agg('sum')
    }).reset_index()

    mapunits_df = mapunits_df.merge(int_gp,
                                    how='left',
                                    left_on=units_id,
                                    right_on='a_' + units_id)

    if units_pop:
        mapunits_df[outcol] = mapunits_df[areacol] / mapunits_df[units_pop]

    dc = ['a_' + units_id, areacol
          ] if units_pop and delareacol else ['a_' + units_id]

    mapunits_df.drop(dc, axis=1, inplace=True)

    obj_to_shp(mapunits_df, 'geometry', w_epsg, output)

    return output
Esempio n. 9
0
def exp_by_group_relfeat(shp, group_col, relfeat, relfeat_id, reltbl,
                         reltbl_sheet, group_fk, relfeat_fk, out_folder,
                         out_tbl):
    """
    Identify groups in shp, get features related with
    these groups and export group features and related
    features to new file
    """

    import os
    import pandas as pd
    from glass.ng.rd import tbl_to_obj
    from glass.ng.wt import obj_to_tbl
    from glass.g.rd.shp import shp_to_obj
    from glass.g.wt.shp import obj_to_shp
    from glass.g.prop.prj import get_shp_epsg

    epsg = get_shp_epsg(shp)

    # Open data
    shp_df = shp_to_obj(shp)
    rel_df = shp_to_obj(relfeat)

    # Get table with relations N-N
    nn_tbl = tbl_to_obj(reltbl, sheet=reltbl_sheet)

    # Relate relfeat with shp groups
    rel_df = rel_df.merge(nn_tbl,
                          how='inner',
                          left_on=relfeat_id,
                          right_on=relfeat_fk)

    # List Groups
    grp_df = pd.DataFrame({
        'cnttemp':
        shp_df.groupby([group_col])[group_col].agg('count')
    }).reset_index()

    ntbls = []
    # Filter and export
    for idx, row in grp_df.iterrows():
        # Get shp_df filter
        new_shp = shp_df[shp_df[group_col] == row[group_col]]

        # Get relfeat filter
        new_relf = rel_df[rel_df[group_fk] == row[group_col]]

        # Export
        shp_i = obj_to_shp(
            new_shp, 'geometry', epsg,
            os.path.join(out_folder, 'lyr_{}.shp'.format(row[group_col])))
        rel_i = obj_to_shp(
            new_relf, 'geometry', epsg,
            os.path.join(out_folder, 'rel_{}.shp'.format(row[group_col])))

        ntbls.append([row[group_col], shp_i, rel_i])

    ntbls = pd.DataFrame(ntbls, columns=['group_id', 'shp_i', 'rel_i'])

    obj_to_tbl(ntbls, out_tbl)

    return out_tbl
Esempio n. 10
0
def cf_based_on_relations(incidents,
                          incidents_id,
                          group_incidents_col,
                          facilities,
                          facilities_id,
                          rel_inc_fac,
                          sheet,
                          group_fk,
                          facilities_fk,
                          output,
                          impedante='TravelTime'):
    """
    Calculate time travel considering specific facilities
    for each group of incidents

    Relations between incidents and facilities are in a auxiliar table (rel_inc_fac).
    Auxiliar table must be a xlsx file
    """

    import os
    import pandas as pd
    from glass.ng.rd import tbl_to_obj
    from glass.g.rd.shp import shp_to_obj
    from glass.g.wt.shp import obj_to_shp
    from glass.g.prop.prj import get_shp_epsg
    from glass.pys.oss import mkdir, fprop
    from glass.g.dp.mge import shps_to_shp

    # Avoid problems when facilities_id == facilities_fk
    facilities_fk = facilities_fk + '_fk' if facilities_id == facilities_fk else \
        facilities_fk

    # Open data
    incidents_df = shp_to_obj(incidents)
    facilities_df = shp_to_obj(facilities)

    rel_df = tbl_to_obj(rel_inc_fac, sheet=sheet)

    # Get SRS
    epsg = get_shp_epsg(incidents)

    # Create dir for temporary files
    tmpdir = mkdir(os.path.join(os.path.dirname(output), fprop(output, 'fn')),
                   overwrite=True)

    # Relate facilities with incidents groups
    facilities_df = facilities_df.merge(rel_df,
                                        how='inner',
                                        left_on=facilities_id,
                                        right_on=facilities_fk)

    # List Groups
    grp_df = pd.DataFrame({
        'cnttemp':
        incidents_df.groupby([group_incidents_col
                              ])[group_incidents_col].agg('count')
    }).reset_index()

    # Do the calculations
    res = []
    for idx, row in grp_df.iterrows():
        # Get incidents for that group
        new_i = incidents_df[incidents_df[group_incidents_col] ==
                             row[group_incidents_col]]

        new_i = obj_to_shp(
            new_i, 'geometry', epsg,
            os.path.join(tmpdir, 'i_{}.shp'.format(row[group_incidents_col])))

        # Get facilities for that group
        new_f = facilities_df[facilities_df[group_fk] ==
                              row[group_incidents_col]]
        new_f = obj_to_shp(
            new_f, 'geometry', epsg,
            os.path.join(tmpdir, 'f_{}.shp'.format(row[group_incidents_col])))

        # calculate closest facility
        cf = closest_facility(
            new_i, incidents_id, new_f,
            os.path.join(tmpdir, 'cf_{}.shp'.format(row[group_incidents_col])))

        res.append(cf)

    # Produce final result
    shps_to_shp(res, output, api="pandas")

    return output
Esempio n. 11
0
    # Do it for each reference feature in ref_geom
    for idx, row in ref_df.iterrows():
        e = 1

        # Export Reference Feature to file
        # Reference = level 1
        nutdf = gp.GeoDataFrame(pd.DataFrame(
            [[1, 1, row.geometry, row[idcol]]], columns=cols),
                                crs='EPSG:{}'.format(str(epsg)),
                                geometry="geom")

        # Add level 1 feature to main table
        main_df = main_df.append(nutdf, ignore_index=True, sort=False)

        nutshp = obj_to_shp(
            nutdf, 'geom', epsg,
            os.path.join(workspace, 'fgrid_{}_1.shp'.format(row[idcol])))

        # Create Reference raster
        rref = shp_to_rst(
            nutshp, None, 10, 0,
            os.path.join(workspace, 'rnut_{}.tif'.format(row[idcol])))

        # Create GRASS GIS Session
        loc_name = 'loc' + row[idcol]
        gbase = run_grass(workspace, location=loc_name, srs=rref)

        import grass.script.setup as gsetup

        gsetup.init(gbase, workspace, loc_name, 'PERMANENT')