def get_poly_avg(input_poly_fc): # as of 11/26/2019, each of these outputs are dictionaries accdata = acc.get_acc_data(input_poly_fc, p.accdata_fc, p.ptype_area_agg, get_ej=False) collision_data = coll.get_collision_data(input_poly_fc, p.ptype_area_agg, p.collisions_fc, 0) mix_data = mixidx.get_mix_idx(p.parcel_pt_fc, input_poly_fc, p.ptype_area_agg) intsecn_dens = intsxn.intersection_density(input_poly_fc, p.intersections_base_fc, p.ptype_area_agg) bikeway_covg = bufnet.get_bikeway_mileage_share(input_poly_fc, p.ptype_area_agg) tran_stop_density = trn_svc.transit_svc_density(input_poly_fc, p.trn_svc_fc, p.ptype_area_agg) emp_ind_wtot = lubuff.point_sum(p.parcel_pt_fc, input_poly_fc, p.ptype_area_agg, [p.col_empind, p.col_emptot], 0) emp_ind_pct = {'emp_ind_pct': emp_ind_wtot[p.col_empind] / emp_ind_wtot[p.col_emptot]} pop_x_ej = lubuff.point_sum(p.parcel_pt_fc, input_poly_fc, p.ptype_area_agg, [p.col_pop_ilut], 0, p.col_ej_ind) pop_tot = sum(pop_x_ej.values()) pct_pop_ej = {'pct_ej_pop': pop_x_ej[1] / pop_tot} job_pop_dens = lubuff.point_sum_density(p.parcel_pt_fc, input_poly_fc, p.ptype_area_agg, \ [p.col_du, p.col_emptot], 0) total_dens = {"job_du_dens_ac": sum(job_pop_dens.values())} out_dict = {} for d in [accdata, collision_data, mix_data, intsecn_dens, bikeway_covg, tran_stop_density, pct_pop_ej,\ emp_ind_pct, total_dens]: out_dict.update(d) return out_dict
def get_poly_avg(input_poly_fc): # as of 11/26/2019, each of these outputs are dictionaries pcl_pt_data = params.parcel_pt_fc_yr() accdata = acc.get_acc_data(input_poly_fc, params.accdata_fc, params.ptype_area_agg, get_ej=False) collision_data = coll.get_collision_data(input_poly_fc, params.ptype_area_agg, params.collisions_fc, 0) mix_data = mixidx.get_mix_idx(pcl_pt_data, input_poly_fc, params.ptype_area_agg) intsecn_dens = intsxn.intersection_density(input_poly_fc, params.intersections_base_fc, params.ptype_area_agg) bikeway_covg = bufnet.get_bikeway_mileage_share(input_poly_fc, params.ptype_area_agg) tran_stop_density = trn_svc.transit_svc_density(input_poly_fc, params.trn_svc_fc, params.ptype_area_agg) emp_ind_wtot = lubuff.point_sum(pcl_pt_data, input_poly_fc, params.ptype_area_agg, [params.col_empind, params.col_emptot], 0) emp_ind_pct = {'EMPIND_jobshare': emp_ind_wtot[params.col_empind] / emp_ind_wtot[params.col_emptot] \ if emp_ind_wtot[params.col_emptot] > 0 else 0} pop_x_ej = lubuff.point_sum(pcl_pt_data, input_poly_fc, params.ptype_area_agg, [params.col_pop_ilut], 0, params.col_ej_ind) pop_tot = sum(pop_x_ej.values()) key_yes_ej = max(list(pop_x_ej.keys())) pct_pop_ej = {'Pct_PopEJArea': pop_x_ej[key_yes_ej] / pop_tot if pop_tot > 0 else 0} job_pop_dens = lubuff.point_sum_density(pcl_pt_data, input_poly_fc, params.ptype_area_agg, \ [params.col_du, params.col_emptot], 0) # total_dens = {"job_du_perNetAcre": sum(job_pop_dens.values())} out_dict = {} for d in [accdata, collision_data, mix_data, intsecn_dens, bikeway_covg, tran_stop_density, pct_pop_ej,\ emp_ind_pct, job_pop_dens]: out_dict.update(d) return out_dict
def poly_avg_futyears( input_poly_fc, data_year ): #IDEALLY could make this part of get_poly_avg as single function with variable number of input args pcl_pt_data = params.parcel_pt_fc_yr(data_year) mix_data = mixidx.get_mix_idx(params.parcel_pt_fc_yr(data_year), input_poly_fc, params.ptype_area_agg) return mix_data
def get_multiyear_data(project_fc, project_type, base_df, analysis_year): ilut_val_fields = [p.col_pop_ilut, p.col_du, p.col_emptot, p.col_k12_enr, p.col_empind, p.col_persntrip_res] \ + p.ilut_ptrip_mode_fields fc_pcl_pt = p.parcel_pt_fc_yr(year) fc_pcl_poly = p.parcel_poly_fc_yr(year) fc_modelhwylinks = p.model_links_fc(year) year_dict = {} # get data on pop, job, k12 totals # point_sum(fc_pclpt, fc_project, project_type, val_fields, buffdist, case_field=None, case_excs_list=[]) ilut_buff_vals = lu_pt_buff.point_sum(fc_pcl_pt, project_fc, project_type, ilut_val_fields, p.ilut_sum_buffdist, case_field=None, case_excs_list=[]) ilut_indjob_share = {"{}_jobshare".format(p.col_empind): ilut_buff_vals[p.col_empind] / ilut_buff_vals[p.col_emptot]} ilut_buff_vals.update(ilut_indjob_share) ilut_mode_split = {"{}_share".format(modetrp): ilut_buff_vals[modetrp] / ilut_buff_vals[p.col_persntrip_res] for modetrp in p.ilut_ptrip_mode_fields} ilut_buff_vals.update(ilut_mode_split) # cleanup to remove non-percentage mode split values, if we want to keep output CSV from getting too long. # for trip_numcol in p.ilut_ptrip_mode_fields: del ilut_buff_vals[trip_numcol] # job + du total job_du_tot = {"SUM_JOB_DU": ilut_buff_vals[p.col_du] + ilut_buff_vals[p.col_emptot]} # model-based vehicle occupancy veh_occ_data = link_occ.get_linkoccup_data(project_fc, project_type, fc_modelhwylinks) # land use diversity index mix_index_data = mixidx.get_mix_idx(fc_pcl_pt, project_fc, project_type) # housing type mix housing_mix_data = lu_pt_buff.point_sum(fc_pcl_pt, project_fc, project_type, [p.col_du], p.du_mix_buffdist, p.col_housing_type, case_excs_list=['Other']) # acres of "natural resources" (land use type = forest or agriculture) nat_resources_data = urbn.nat_resources(project_fc, project_type, fc_pcl_poly, year) # combine into dict for d in [ilut_buff_vals, job_du_tot, veh_occ_data, mix_index_data, housing_mix_data, nat_resources_data]: year_dict.update(d) # make dict into dataframe df_year_out = pd.DataFrame.from_dict(year_dict, orient='index') return df_year_out
# cleanup to remove non-percentage mode split values, if we want to keep output CSV from getting too long. # for trip_numcol in p.ilut_ptrip_mode_fields: del ilut_buff_vals[trip_numcol] # job + du total job_du_tot = { "SUM_JOB_DU": ilut_buff_vals[p.col_du] + ilut_buff_vals[p.col_emptot] } # model-based vehicle occupancy veh_occ_data = link_occ.get_linkoccup_data(project_fc, project_type, fc_modelhwylinks) # land use diversity index mix_index_data = mixidx.get_mix_idx(fc_pcl_pt, project_fc, project_type) # housing type mix housing_mix_data = lu_pt_buff.point_sum(fc_pcl_pt, project_fc, project_type, [p.col_du], p.du_mix_buffdist, p.col_housing_type, case_excs_list=['Other']) # acres of "natural resources" (land use type = forest or agriculture) nat_resources_data = urbn.nat_resources(project_fc, fc_pcl_poly, year) # combine into dict for d in [ ilut_buff_vals, job_du_tot, veh_occ_data, mix_index_data,
ilut_buff_vals.update(ilut_mode_split) # cleanup to remove non-percentage mode split values, if we want to keep output CSV from getting too long. # for trip_numcol in p.ilut_ptrip_mode_fields: del ilut_buff_vals[trip_numcol] # job + du total job_du_tot = { "SUM_JOB_DU": ilut_buff_vals[p.col_du] + ilut_buff_vals[p.col_emptot] } # model-based vehicle occupancy veh_occ_data = link_occ.get_linkoccup_data(project_fc, p.ptype_arterial, p.model_links_fc) # land use diversity index mix_index_data = mixidx.get_mix_idx(p.parcel_pt_fc, project_fc, p.ptype_arterial) # housing type mix housing_mix_data = lu_pt_buff.point_sum(p.parcel_pt_fc, project_fc, project_type, [p.col_du], p.du_mix_buffdist, p.col_housing_type, case_excs_list=['Other']) # acres of "natural resources" (land use type = forest or agriculture) nat_resources_data = urbn.nat_resources(project_fc, p.parcel_poly_fc, 2016) # combine all together----------------------------------------------------------- out_dict = {
def get_multiyear_data(fc_tripshedpoly, projtyp, base_df, analysis_year): print("getting multi-year data for {}...".format(analysis_year)) ilut_val_fields = [params.col_pop_ilut, params.col_du, params.col_emptot, params.col_k12_enr, params.col_empind, params.col_persntrip_res] \ + params.ilut_ptrip_mode_fields fc_pcl_pt = params.parcel_pt_fc_yr(analysis_year) fc_pcl_poly = params.parcel_poly_fc_yr(analysis_year) year_dict = {} # get data on pop, job, k12 totals # point_sum(fc_pclpt, fc_tripshedpoly, projtyp, val_fields, buffdist, case_field=None, case_excs_list=[]) ilut_buff_vals = LandUseBuffCalcs(fc_pcl_pt, fc_tripshedpoly, projtyp, ilut_val_fields, params.ilut_sum_buffdist, case_field=None, case_excs_list=[]).point_sum() ilut_indjob_share = { "{}_jobshare".format(params.col_empind): ilut_buff_vals[params.col_empind] / ilut_buff_vals[params.col_emptot] } ilut_buff_vals.update(ilut_indjob_share) ilut_mode_split = { "{}_share".format(modetrp): ilut_buff_vals[modetrp] / ilut_buff_vals[params.col_persntrip_res] for modetrp in params.ilut_ptrip_mode_fields } ilut_buff_vals.update(ilut_mode_split) # cleanup to remove non-percentage mode split values, if we want to keep output CSV from getting too long. # for trip_numcol in params.ilut_ptrip_mode_fields: del ilut_buff_vals[trip_numcol] # job + du total job_du_tot = { "SUM_JOB_DU": ilut_buff_vals[params.col_du] + ilut_buff_vals[params.col_emptot] } # land use diversity index mix_index_data = mixidx.get_mix_idx(fc_pcl_pt, fc_tripshedpoly, projtyp) # housing type mix housing_mix_data = LandUseBuffCalcs(fc_pcl_pt, fc_tripshedpoly, projtyp, [params.col_du], params.du_mix_buffdist, params.col_housing_type, case_excs_list=['Other']).point_sum() # acres of "natural resources" (land use type = forest or agriculture) nat_resources_data = urbn.nat_resources(fc_tripshedpoly, projtyp, fc_pcl_poly, analysis_year) # combine into dict for d in [ ilut_buff_vals, job_du_tot, mix_index_data, housing_mix_data, nat_resources_data ]: year_dict.update(d) # make dict into dataframe df_year_out = pd.DataFrame.from_dict(year_dict, orient='index') return df_year_out