Exemplo n.º 1
0
def get_model_link_sums(fc_polygon, fc_model_links):
    '''For all travel model highway links that have their center within a polygon (e.g. buffer
    around a project line, or a community type, or a trip shed), sum the values for user-specified
    metrics. E.g. daily VMT for all selected intersectin model links, total lane miles on intersecting
    links, etc.'''

    sufx = int(time.clock()) + 1
    fl_polygon = os.path.join('memory', 'fl_polygon{}'.format(sufx))
    fl_model_links = os.path.join('memory', 'fl_model_links{}'.format(sufx))

    if arcpy.Exists(fl_polygon): arcpy.Delete_management(fl_polygon)
    arcpy.MakeFeatureLayer_management(fc_polygon, fl_polygon)

    if arcpy.Exists(fl_model_links): arcpy.Delete_management(fl_model_links)
    arcpy.MakeFeatureLayer_management(fc_model_links, fl_model_links)

    # select model links whose centroid is within the polygon area
    arcpy.SelectLayerByLocation_management(fl_model_links,
                                           "HAVE_THEIR_CENTER_IN", fl_polygon)

    link_data_cols = [
        params.col_capclass, params.col_distance, params.col_lanemi,
        params.col_dayvmt
    ]
    output_data_cols = [params.col_dayvmt, params.col_distance]

    # load model links, selected to be near project, into a dataframe
    df_linkdata = utils.esri_object_to_df(fl_model_links, link_data_cols)

    # get total VMT for links within the area
    out_dict = {col: df_linkdata[col].sum() for col in output_data_cols}
    return out_dict
Exemplo n.º 2
0
def get_acc_data(fc_project, fc_accdata, project_type, get_ej=False):
    '''Calculate average accessibility to selected destination types for all
    polygons that either intersect the project line or are within a community type polygon.
    Average accessibility is weighted by each polygon's population.'''

    arcpy.AddMessage("Calculating accessibility metrics...")

    sufx = int(time.clock()) + 1
    fl_accdata = os.path.join('memory', 'fl_accdata{}'.format(sufx))
    fl_project = g_ESRI_variable_2

    if arcpy.Exists(fl_project): arcpy.Delete_management(fl_project)
    arcpy.MakeFeatureLayer_management(fc_project, fl_project)

    if arcpy.Exists(fl_accdata): arcpy.Delete_management(fl_accdata)
    arcpy.MakeFeatureLayer_management(fc_accdata, fl_accdata)

    # select polygons that intersect with the project line
    searchdist = 0 if project_type == params.ptype_area_agg else params.bg_search_dist
    arcpy.SelectLayerByLocation_management(fl_accdata, "INTERSECT", fl_project,
                                           searchdist, "NEW_SELECTION")

    # read accessibility data from selected polygons into a dataframe
    accdata_fields = [params.col_geoid, params.col_acc_ej_ind, params.col_pop
                      ] + params.acc_cols_ej
    accdata_df = utils.esri_object_to_df(fl_accdata, accdata_fields)

    # get pop-weighted accessibility values for all accessibility columns

    out_dict = {}
    if get_ej:  # if for enviro justice population, weight by population for EJ polygons only.
        for col in params.acc_cols_ej:
            col_wtd = "{}_wtd".format(col)
            col_ej_pop = "{}_EJ".format(params.col_pop)
            accdata_df[col_wtd] = accdata_df[col] * accdata_df[
                params.col_pop] * accdata_df[params.col_acc_ej_ind]
            accdata_df[col_ej_pop] = accdata_df[params.col_pop] * accdata_df[
                params.col_acc_ej_ind]

            tot_ej_pop = accdata_df[col_ej_pop].sum()

            out_wtd_acc = accdata_df[col_wtd].sum(
            ) / tot_ej_pop if tot_ej_pop > 0 else 0
            col_out_ej = "{}_EJ".format(col)
            out_dict[col_out_ej] = out_wtd_acc
    else:
        total_pop = accdata_df[params.col_pop].sum()
        for col in params.acc_cols:
            if total_pop <= 0:  # if no one lives near project, get unweighted avg accessibility of block groups near project
                out_wtd_acc = accdata_df[col].mean()
            else:
                col_wtd = "{}_wtd".format(col)
                accdata_df[col_wtd] = accdata_df[col] * accdata_df[
                    params.col_pop]
                out_wtd_acc = accdata_df[col_wtd].sum() / total_pop

            out_dict[col] = out_wtd_acc

    return out_dict
Exemplo n.º 3
0
def get_acc_data(fc_project, fc_accdata, project_type, get_ej=False):
    arcpy.AddMessage("calculating accessibility metrics...")

    out_dict = {}
    try:
        fl_accdata = "fl_accdata"
        fl_project = "fl_project"

        utils.make_fl_conditional(fc_project, fl_project)
        utils.make_fl_conditional(fc_accdata, fl_accdata)

        # select polygons that intersect with the project line
        searchdist = 0 if project_type == p.ptype_area_agg else p.bg_search_dist
        arcpy.SelectLayerByLocation_management(fl_accdata, "INTERSECT",
                                               fl_project, searchdist,
                                               "NEW_SELECTION")

        # read accessibility data from selected polygons into a dataframe
        accdata_fields = [p.col_geoid, p.col_acc_ej_ind, p.col_pop
                          ] + p.acc_cols_ej
        accdata_df = utils.esri_object_to_df(fl_accdata, accdata_fields)

        # get pop-weighted accessibility values for all accessibility columns
        if get_ej:
            for col in p.acc_cols_ej:
                col_wtd = "{}_wtd".format(col)
                col_ej_pop = "{}_EJ".format(p.col_pop)
                accdata_df[col_wtd] = accdata_df[col] * accdata_df[
                    p.col_pop] * accdata_df[p.col_acc_ej_ind]
                accdata_df[col_ej_pop] = accdata_df[p.col_pop] * accdata_df[
                    p.col_acc_ej_ind]

                tot_ej_pop = accdata_df[col_ej_pop].sum()

                out_wtd_acc = accdata_df[col_wtd].sum(
                ) / tot_ej_pop if tot_ej_pop > 0 else 0
                col_out_ej = "{}_EJ".format(col)
                out_dict[col_out_ej] = out_wtd_acc
        else:
            for col in p.acc_cols:
                col_wtd = "{}_wtd".format(col)
                accdata_df[col_wtd] = accdata_df[col] * accdata_df[p.col_pop]
                out_wtd_acc = accdata_df[col_wtd].sum() / accdata_df[
                    p.col_pop].sum()
                out_dict[col] = out_wtd_acc
    except:
        msg = "{} {}".format(arcpy.GetMessages(2), trace())
        arcpy.AddMessage(msg)

    return out_dict
Exemplo n.º 4
0
def get_proj_ctype(in_project_fc, commtypes_fc):
    '''Get project community type, based on which community type has most spatial overlap with project'''
    
    temp_intersect_fc = r'memory/temp_intersect_fc'
    arcpy.Intersect_analysis([in_project_fc, commtypes_fc], temp_intersect_fc, "ALL", 
                             0, "LINE")
    
    len_field = 'SHAPE@LENGTH'
    fields = ['OBJECTID', len_field, p.col_ctype]
    df_intersect = utils.esri_object_to_df(temp_intersect_fc, fields)
    
    proj_ctype = list(df_intersect[df_intersect[len_field] == max(df_intersect[len_field])][p.col_ctype])[0]
    
    return proj_ctype
Exemplo n.º 5
0
def make_summary_df(in_fl, input_cols, landuse_cols, col_hh, park_calc_dict):

    # load into dataframe
    parcel_df = utils.esri_object_to_df(in_fl, input_cols)

    col_parkac = park_calc_dict['park_acres_field']
    col_lutype = park_calc_dict['lutype_field']
    lutype_parks = park_calc_dict['park_lutype']
    col_area_ac = park_calc_dict['area_field']

    # add col for park acres, set to total parcel acres where land use type is parks/open space land use type
    parcel_df.loc[(parcel_df[col_lutype] == lutype_parks),
                  col_parkac] = parcel_df[col_area_ac]

    cols = landuse_cols + [col_hh]
    out_df = pd.DataFrame(parcel_df[cols].sum(axis=0)).T

    return out_df
Exemplo n.º 6
0
def get_model_link_sums(fc_polygon, fc_model_links):

    fl_polygon = "fl_polygon"
    fl_model_links = "fl_model_links"
    utils.make_fl_conditional(fc_polygon, fl_polygon)
    utils.make_fl_conditional(fc_model_links, fl_model_links)

    # select model links whose centroid is within the polygon area
    arcpy.SelectLayerByLocation_management(fl_model_links, "HAVE_THEIR_CENTER_IN", fl_polygon)

    link_data_cols = [p.col_capclass, p.col_distance, p.col_lanemi, p.col_dayvmt]
    output_data_cols = [p.col_dayvmt, p.col_distance]

    # load model links, selected to be near project, into a dataframe
    df_linkdata = utils.esri_object_to_df(fl_model_links, link_data_cols)

    # get total VMT for links within the area
    out_dict = {col: df_linkdata[col].sum() for col in output_data_cols}
    return out_dict
Exemplo n.º 7
0
def get_linkoccup_data(fc_project, project_type, fc_model_links):
    arcpy.AddMessage("Getting modeled vehicle occupancy data...")
    fl_project = g_ESRI_variable_1
    fl_model_links = g_ESRI_variable_2

    arcpy.MakeFeatureLayer_management(fc_project, fl_project)
    arcpy.MakeFeatureLayer_management(fc_model_links, fl_model_links)

    # get model links that are on specified link type with centroid within search distance of project
    arcpy.SelectLayerByLocation_management(fl_model_links,
                                           'HAVE_THEIR_CENTER_IN', fl_project,
                                           params.modlink_searchdist)

    # load data into dataframe then subselect only ones that are on same road type as project (e.g. fwy vs. arterial)
    df_cols = [
        params.col_capclass, params.col_lanemi, params.col_tranvol,
        params.col_dayvehvol, params.col_sovvol, params.col_hov2vol,
        params.col_hov3vol, params.col_daycommvehvol
    ]
    df_linkdata = utils.esri_object_to_df(fl_model_links, df_cols)

    if project_type == params.ptype_fwy:
        df_linkdata = df_linkdata.loc[df_linkdata[params.col_capclass].isin(
            params.capclasses_fwy)]
    else:
        df_linkdata = df_linkdata.loc[df_linkdata[params.col_capclass].isin(
            params.capclass_arterials)]

    df_trnlinkdata = df_linkdata.loc[pd.notnull(
        df_linkdata[params.col_tranvol])]
    avg_proj_trantrips = get_wtdavg_vehvol(
        df_trnlinkdata,
        params.col_tranvol) if df_trnlinkdata.shape[0] > 0 else 0
    avg_proj_vehocc = get_wtdavg_vehocc(
        df_linkdata) if df_linkdata.shape[0] > 0 else 0

    out_dict = {
        "avg_2way_trantrips": avg_proj_trantrips,
        "avg_2way_vehocc": avg_proj_vehocc
    }

    return out_dict
Exemplo n.º 8
0
def conflate_tmc2projline(fl_proj, dirxn_list, tmc_dir_field, fl_tmcs_buffd,
                          speed_data_fields):

    out_row_dict = {}

    # get length of project
    fld_shp_len = "SHAPE@LENGTH"
    fld_totprojlen = "proj_length_ft"

    with arcpy.da.SearchCursor(fl_proj, fld_shp_len) as cur:
        for row in cur:
            out_row_dict[fld_totprojlen] = row[0]

    for direcn in dirxn_list:
        # https://support.esri.com/en/technical-article/000012699

        # temporary files
        temp_intersctpts = "temp_intersectpoints"
        temp_intrsctpt_singlpt = "temp_intrsctpt_singlpt"  # converted from multipoint to single point (1 pt per feature)
        temp_splitprojlines = "temp_splitprojlines"  # fc of project line split up to match TMC buffer extents
        temp_splitproj_w_tmcdata = "temp_splitproj_w_tmcdata"  # fc of split project lines with TMC data on them

        fl_splitprojlines = "fl_splitprojlines"
        fl_splitproj_w_tmcdata = "fl_splitproj_w_tmcdata"

        # get TMCs whose buffers intersect the project line
        arcpy.SelectLayerByLocation_management(fl_tmcs_buffd, "INTERSECT",
                                               fl_proj)

        # select TMCs that intersect the project and are in indicated direction
        sql_sel_tmcxdir = "{} = '{}'".format(tmc_dir_field, direcn)
        arcpy.SelectLayerByAttribute_management(fl_tmcs_buffd,
                                                "SUBSET_SELECTION",
                                                sql_sel_tmcxdir)

        # split the project line at the boundaries of the TMC buffer, creating points where project line intersects TMC buffer boundaries
        arcpy.Intersect_analysis([fl_proj, fl_tmcs_buffd], temp_intersctpts,
                                 "", "", "POINT")
        arcpy.MultipartToSinglepart_management(temp_intersctpts,
                                               temp_intrsctpt_singlpt)

        # split project line into pieces at points where it intersects buffer, with 10ft tolerance
        # (not sure why 10ft tolerance needed but it is, zero tolerance results in some not splitting)
        arcpy.SplitLineAtPoint_management(fl_proj, temp_intrsctpt_singlpt,
                                          temp_splitprojlines, "10 Feet")
        arcpy.MakeFeatureLayer_management(temp_splitprojlines,
                                          fl_splitprojlines)

        # get TMC speeds onto each piece of the split project line via spatial join
        arcpy.SpatialJoin_analysis(temp_splitprojlines, fl_tmcs_buffd,
                                   temp_splitproj_w_tmcdata, "JOIN_ONE_TO_ONE",
                                   "KEEP_ALL", "#", "HAVE_THEIR_CENTER_IN",
                                   "30 Feet")

        # convert to fl and select records where "check field" col val is not none
        arcpy.MakeFeatureLayer_management(temp_splitproj_w_tmcdata,
                                          fl_splitproj_w_tmcdata)

        check_field = speed_data_fields[
            0]  # choose first speed value field for checking--if it's null, then don't include those rows in aggregation
        sql_notnull = "{} IS NOT NULL".format(check_field)
        arcpy.SelectLayerByAttribute_management(fl_splitproj_w_tmcdata,
                                                "NEW_SELECTION", sql_notnull)

        # convert the selected records into a numpy array then a pandas dataframe
        flds_df = [fld_shp_len] + speed_data_fields
        df_spddata = utils.esri_object_to_df(fl_splitproj_w_tmcdata, flds_df)

        # remove project pieces with no speed data so their distance isn't included in weighting
        df_spddata = df_spddata.loc[pd.notnull(
            df_spddata[speed_data_fields[0]])].astype(float)

        dir_len = df_spddata[fld_shp_len].sum(
        )  #sum of lengths of project segments that intersect TMCs in the specified direction
        out_row_dict["{}_calc_len".format(
            direcn
        )] = dir_len  #"calc" length because it may not be same as project length

        #get distance-weighted average value for each speed/congestion field
        #for PHED or hours of delay, will want to get dist-weighted SUM; for speed/reliability, want dist-weighted AVG
        #ideally this would be a dict of {<field>:<aggregation method>}
        for field in speed_data_fields:
            fielddir = "{}{}".format(direcn,
                                     field)  # add direction tag to field names
            # if there's speed data, get weighted average value.
            linklen_w_speed_data = df_spddata[fld_shp_len].sum()
            if linklen_w_speed_data > 0:  #wgtd avg = sum(piece's data * piece's len)/(sum of all piece lengths)
                avg_data_val = (df_spddata[field]*df_spddata[fld_shp_len]).sum() \
                                / df_spddata[fld_shp_len].sum()

                out_row_dict[fielddir] = avg_data_val
            else:
                out_row_dict[fielddir] = df_spddata[field].mean(
                )  #if no length, just return mean speed? Maybe instead just return 'no data avaialble'? Or -1 to keep as int?
                continue

    #cleanup
    fcs_to_delete = [
        temp_intersctpts, temp_intrsctpt_singlpt, temp_splitprojlines,
        temp_splitproj_w_tmcdata
    ]
    for fc in fcs_to_delete:
        arcpy.Delete_management(fc)
    return pd.DataFrame([out_row_dict])
Exemplo n.º 9
0
def get_collision_data(fc_project, project_type, fc_colln_pts, project_adt):

    arcpy.AddMessage("Aggregating collision data...")
    fc_model_links = p.model_links_fc()
    
    fl_project = 'proj_fl'
    fl_colln_pts = 'collision_fl'

    utils.make_fl_conditional(fc_project, fl_project)
    utils.make_fl_conditional(fc_colln_pts, fl_colln_pts)

    # if for project segment, get annual VMT for project segment based on user input and segment length
    df_projlen = utils.esri_object_to_df(fl_project, ["SHAPE@LENGTH"])
    proj_len_mi = df_projlen.iloc[0][0] / p.ft2mile  # return project length in miles

    # for aggregate, polygon-based avgs (e.g., community type, whole region), use model for VMT; for
    # project, the VMT will be based on combo of project length and user-entered ADT for project
    # approximate annual project VMT, assuming ADT is reflective of weekdays only, but assumes
    if project_type == p.ptype_area_agg:
        vmt_dict = get_model_link_sums(fc_project, fc_model_links)
        dayvmt = vmt_dict[p.col_dayvmt]
        ann_proj_vmt = dayvmt * 320
        proj_len_mi = get_centerline_miles(fc_project, p.reg_centerline_fc)
    else:
        ann_proj_vmt = project_adt * proj_len_mi * 320

    # get collision totals
    searchdist = 0 if project_type == p.ptype_area_agg else p.colln_searchdist
    arcpy.SelectLayerByLocation_management(fl_colln_pts, 'WITHIN_A_DISTANCE', fl_project, searchdist)
    colln_cols = [p.col_fwytag, p.col_nkilled, p.col_bike_ind, p.col_ped_ind]

    df_collndata = utils.esri_object_to_df(fl_colln_pts, colln_cols)

    # filter so that fwy collisions don't get tagged to non-freeway projects, and vice-versa
    if project_type == p.ptype_fwy:
        df_collndata = df_collndata.loc[df_collndata[p.col_fwytag] == 1]
    elif project_type == p.ptype_area_agg:
        pass  # for aggregating at polygon level, like region or community type, we want all collisions on all roads
    else:
        df_collndata = df_collndata.loc[df_collndata[p.col_fwytag] == 0]

    total_collns = df_collndata.shape[0]
    fatal_collns = df_collndata.loc[df_collndata[p.col_nkilled] > 0].shape[0]
    bikeped_collns = df_collndata.loc[(df_collndata[p.col_bike_ind] == p.ind_val_true)
                                      | (df_collndata[p.col_ped_ind] == p.ind_val_true)].shape[0]
    pct_bikeped_collns = bikeped_collns / total_collns if total_collns > 0 else 0

    bikeped_colln_clmile = bikeped_collns / proj_len_mi

    # collisions per million VMT (MVMT) = avg annual collisions / (modeled daily VMT * 320 days) * 1,000,000
    avg_ann_collisions = total_collns / p.years_of_collndata
    avg_ann_fatalcolln = fatal_collns / p.years_of_collndata

    colln_rate_per_vmt = avg_ann_collisions / ann_proj_vmt * 100000000 if ann_proj_vmt > 0 else 0
    fatalcolln_per_vmt = avg_ann_fatalcolln / ann_proj_vmt * 100000000 if ann_proj_vmt > 0 else 0
    pct_fatal_collns = avg_ann_fatalcolln / avg_ann_collisions if ann_proj_vmt > 0 else 0

    out_dict = {"TOT_COLLISNS": total_collns, "TOT_COLLISNS_PER_100MVMT": colln_rate_per_vmt,
                "FATAL_COLLISNS": fatal_collns, "FATAL_COLLISNS_PER_100MVMT": fatalcolln_per_vmt,
                "PCT_FATAL_COLLISNS": pct_fatal_collns, "BIKEPED_COLLISNS": bikeped_collns, 
                "BIKEPED_COLLISNS_PER_CLMILE": bikeped_colln_clmile, "PCT_BIKEPED_COLLISNS": pct_bikeped_collns}

    return out_dict
Exemplo n.º 10
0
def conflate_tmc2projline(fl_proj, dirxn_list, tmc_dir_field, fl_tmcs_buffd,
                          fields_calc_dict):

    speed_data_fields = [k for k, v in fields_calc_dict.items()]
    out_row_dict = {}

    # get length of project
    fld_shp_len = "SHAPE@LENGTH"
    fld_totprojlen = "proj_length_ft"

    with arcpy.da.SearchCursor(fl_proj, fld_shp_len) as cur:
        for row in cur:
            out_row_dict[fld_totprojlen] = row[0]

    for direcn in dirxn_list:
        # https://support.esri.com/en/technical-article/000012699

        # temporary files
        scratch_gdb = arcpy.env.scratchGDB

        temp_intersctpts = os.path.join(
            scratch_gdb, "temp_intersectpoints"
        )  # r"{}\temp_intersectpoints".format(scratch_gdb)
        temp_intrsctpt_singlpt = os.path.join(
            scratch_gdb, "temp_intrsctpt_singlpt"
        )  # converted from multipoint to single point (1 pt per feature)
        temp_splitprojlines = os.path.join(
            scratch_gdb, "temp_splitprojlines"
        )  # fc of project line split up to match TMC buffer extents
        temp_splitproj_w_tmcdata = os.path.join(
            scratch_gdb, "temp_splitproj_w_tmcdata"
        )  # fc of split project lines with TMC data on them

        fl_splitprojlines = g_ESRI_variable_1
        fl_splitproj_w_tmcdata = g_ESRI_variable_2

        # get TMCs whose buffers intersect the project line
        arcpy.SelectLayerByLocation_management(fl_tmcs_buffd, "INTERSECT",
                                               fl_proj)

        # select TMCs that intersect the project and are in indicated direction
        sql_sel_tmcxdir = g_ESRI_variable_3.format(tmc_dir_field, direcn)
        arcpy.SelectLayerByAttribute_management(fl_tmcs_buffd,
                                                "SUBSET_SELECTION",
                                                sql_sel_tmcxdir)

        # split the project line at the boundaries of the TMC buffer, creating points where project line intersects TMC buffer boundaries
        arcpy.Intersect_analysis([fl_proj, fl_tmcs_buffd], temp_intersctpts,
                                 "", "", "POINT")
        arcpy.MultipartToSinglepart_management(temp_intersctpts,
                                               temp_intrsctpt_singlpt)

        # split project line into pieces at points where it intersects buffer, with 10ft tolerance
        # (not sure why 10ft tolerance needed but it is, zero tolerance results in some not splitting)
        arcpy.SplitLineAtPoint_management(fl_proj, temp_intrsctpt_singlpt,
                                          temp_splitprojlines, "10 Feet")
        arcpy.MakeFeatureLayer_management(temp_splitprojlines,
                                          fl_splitprojlines)

        # get TMC speeds onto each piece of the split project line via spatial join
        arcpy.SpatialJoin_analysis(temp_splitprojlines, fl_tmcs_buffd,
                                   temp_splitproj_w_tmcdata, "JOIN_ONE_TO_ONE",
                                   "KEEP_ALL", "#", "HAVE_THEIR_CENTER_IN",
                                   "30 Feet")

        # convert to fl and select records where "check field" col val is not none
        arcpy.MakeFeatureLayer_management(temp_splitproj_w_tmcdata,
                                          fl_splitproj_w_tmcdata)

        check_field = speed_data_fields[
            0]  # choose first speed value field for checking--if it's null, then don't include those rows in aggregation
        sql_notnull = g_ESRI_variable_4.format(check_field)
        arcpy.SelectLayerByAttribute_management(fl_splitproj_w_tmcdata,
                                                "NEW_SELECTION", sql_notnull)

        # convert the selected records into a numpy array then a pandas dataframe
        flds_df = [fld_shp_len] + speed_data_fields
        df_spddata = utils.esri_object_to_df(fl_splitproj_w_tmcdata, flds_df)

        # remove project pieces with no speed data so their distance isn't included in weighting
        df_spddata = df_spddata.loc[pd.notnull(
            df_spddata[speed_data_fields[0]])].astype(float)

        # remove rows where there wasn't enough NPMRDS data to get a valid speed or reliability reading
        df_spddata = df_spddata.loc[df_spddata[flds_df].min(axis=1) > 0]

        dir_len = df_spddata[fld_shp_len].sum(
        )  #sum of lengths of project segments that intersect TMCs in the specified direction
        out_row_dict["{}_calc_len".format(
            direcn
        )] = dir_len  #"calc" length because it may not be same as project length

        # go through and do conflation calculation for each TMC-based data field based on correct method of aggregation
        for field, calcmthd in fields_calc_dict.items():
            if calcmthd == params.calc_inv_avg:  # See PPA documentation on how to calculated "inverted speed average" method
                sd_dict = get_wtd_speed(df_spddata, field, direcn, fld_shp_len)
                out_row_dict.update(sd_dict)
            elif calcmthd == params.calc_distwt_avg:
                fielddir = "{}{}".format(
                    direcn, field)  # add direction tag to field names
                # if there's speed data, get weighted average value.
                linklen_w_speed_data = df_spddata[fld_shp_len].sum()
                if linklen_w_speed_data > 0:  #wgtd avg = sum(piece's data * piece's len)/(sum of all piece lengths)
                    avg_data_val = (df_spddata[field]*df_spddata[fld_shp_len]).sum() \
                                    / df_spddata[fld_shp_len].sum()

                    out_row_dict[fielddir] = avg_data_val
                else:
                    out_row_dict[fielddir] = df_spddata[field].mean(
                    )  #if no length, just return mean speed? Maybe instead just return 'no data avaialble'? Or -1 to keep as int?
                    continue
            else:
                continue

    #cleanup
    fcs_to_delete = [
        temp_intersctpts, temp_intrsctpt_singlpt, temp_splitprojlines,
        temp_splitproj_w_tmcdata
    ]
    for fc in fcs_to_delete:
        arcpy.Delete_management(fc)
    return pd.DataFrame([out_row_dict])
Exemplo n.º 11
0
def get_collision_data(fc_project, project_type, fc_colln_pts, project_adt):
    '''Inputs:
        fc_project = project line around which a buffer will be drawn for selecting collision locations
        project_type = whether it's a freeway project, arterial project, etc. Or if it is a 
        community design project.
        
        With user-entered ADT (avg daily traffic) and a point layer of collision locations, function calculates
        several key safety metrics including total collisions, collisions/100M VMT, percent bike/ped collisions, etc.'''

    arcpy.AddMessage("Aggregating collision data...")

    fc_model_links = params.model_links_fc()

    sufx = int(time.clock()) + 1
    fl_project = g_ESRI_variable_5
    fl_colln_pts = os.path.join('memory', 'fl_colln_pts{}'.format(sufx))

    if arcpy.Exists(fl_project): arcpy.Delete_management(fl_project)
    arcpy.MakeFeatureLayer_management(fc_project, fl_project)

    if arcpy.Exists(fl_colln_pts): arcpy.Delete_management(fl_colln_pts)
    arcpy.MakeFeatureLayer_management(fc_colln_pts, fl_colln_pts)

    # if for project segment, get annual VMT for project segment based on user input and segment length
    df_projlen = utils.esri_object_to_df(fl_project, ["SHAPE@LENGTH"])
    proj_len_mi = df_projlen.iloc[0][
        0] / params.ft2mile  # return project length in miles

    # for aggregate, polygon-based avgs (e.g., community type, whole region), use model for VMT; for
    # project, the VMT will be based on combo of project length and user-entered ADT for project
    # approximate annual project VMT, assuming ADT is reflective of weekdays only, but assumes
    if project_type == params.ptype_area_agg:
        vmt_dict = get_model_link_sums(fc_project, fc_model_links)
        dayvmt = vmt_dict[params.col_dayvmt]
        ann_proj_vmt = dayvmt * 320
        proj_len_mi = get_centerline_miles(
            fc_project, params.reg_artcollcline_fc
        )  # only gets for collector streets and above
    else:
        ann_proj_vmt = project_adt * proj_len_mi * 320

    # get collision totals
    searchdist = 0 if project_type == params.ptype_area_agg else params.colln_searchdist
    arcpy.SelectLayerByLocation_management(fl_colln_pts, 'WITHIN_A_DISTANCE',
                                           fl_project, searchdist)
    colln_cols = [
        params.col_fwytag, params.col_nkilled, params.col_bike_ind,
        params.col_ped_ind
    ]

    df_collndata = utils.esri_object_to_df(fl_colln_pts, colln_cols)

    # filter so that fwy collisions don't get tagged to non-freeway projects, and vice-versa
    if project_type == params.ptype_fwy:
        df_collndata = df_collndata.loc[df_collndata[params.col_fwytag] == 1]
    elif project_type == params.ptype_area_agg:
        pass  # for aggregating at polygon level, like region or community type, we want all collisions on all roads
    else:
        df_collndata = df_collndata.loc[df_collndata[params.col_fwytag] == 0]

    total_collns = df_collndata.shape[0]
    fatal_collns = df_collndata.loc[
        df_collndata[params.col_nkilled] > 0].shape[0]
    bikeped_collns = df_collndata.loc[
        (df_collndata[params.col_bike_ind] == params.ind_val_true)
        | (df_collndata[params.col_ped_ind] == params.ind_val_true)].shape[0]
    pct_bikeped_collns = bikeped_collns / total_collns if total_collns > 0 else 0

    bikeped_colln_clmile = bikeped_collns / proj_len_mi

    # collisions per million VMT (MVMT) = avg annual collisions / (modeled daily VMT * 320 days) * 1,000,000
    avg_ann_collisions = total_collns / params.years_of_collndata
    avg_ann_fatalcolln = fatal_collns / params.years_of_collndata

    colln_rate_per_vmt = avg_ann_collisions / ann_proj_vmt * 100000000 if ann_proj_vmt > 0 else -1
    fatalcolln_per_vmt = avg_ann_fatalcolln / ann_proj_vmt * 100000000 if ann_proj_vmt > 0 else -1
    pct_fatal_collns = avg_ann_fatalcolln / avg_ann_collisions if avg_ann_collisions > 0 else 0

    out_dict = {
        "TOT_COLLISNS": total_collns,
        "TOT_COLLISNS_PER_100MVMT": colln_rate_per_vmt,
        "FATAL_COLLISNS": fatal_collns,
        "FATAL_COLLISNS_PER_100MVMT": fatalcolln_per_vmt,
        "PCT_FATAL_COLLISNS": pct_fatal_collns,
        "BIKEPED_COLLISNS": bikeped_collns,
        "BIKEPED_COLLISNS_PER_CLMILE": bikeped_colln_clmile,
        "PCT_BIKEPED_COLLISNS": pct_bikeped_collns
    }

    return out_dict