def allocation_helper(df_w_sector, method, attr): """ Used when two df required to create allocation ratio :param df_w_sector: :param method: currently written for 'multiplication' :param attr: :return: """ helper_allocation = flowsa.getFlowByActivity( flowclass=[attr['helper_source_class']], datasource=attr['helper_source'], years=[attr['helper_source_year']]) # fill null values helper_allocation = helper_allocation.fillna(value=fba_fill_na_dict) # convert unit helper_allocation = convert_unit(helper_allocation) # assign naics to allocation dataset helper_allocation = add_sectors_to_flowbyactivity( helper_allocation, sectorsourcename=method['target_sector_source'], levelofSectoragg=attr['helper_sector_aggregation']) # generalize activity field names to enable link to water withdrawal table helper_allocation = generalize_activity_field_names(helper_allocation) # drop columns helper_allocation = helper_allocation.drop( columns=['Activity', 'Description', 'Min', 'Max']) # rename column helper_allocation = helper_allocation.rename( columns={"FlowAmount": 'HelperFlow'}) # merge allocation df with helper df based on sectors, depending on geo scales of dfs if attr['helper_from_scale'] == 'national': modified_fba_allocation = df_w_sector.merge( helper_allocation[['Sector', 'HelperFlow']], how='left') if (attr['helper_from_scale'] == 'state') and (attr['allocation_from_scale'] == 'county'): helper_allocation['Location_tmp'] = helper_allocation[ 'Location'].apply(lambda x: str(x[0:2])) df_w_sector['Location_tmp'] = df_w_sector['Location'].apply( lambda x: str(x[0:2])) modified_fba_allocation = df_w_sector.merge( helper_allocation[['Sector', 'Location_tmp', 'HelperFlow']], how='left') modified_fba_allocation = modified_fba_allocation.drop( columns=['Location_tmp']) # modify flow amounts using helper data if attr['helper_method'] == 'multiplication': modified_fba_allocation['FlowAmount'] = modified_fba_allocation[ 'FlowAmount'] * modified_fba_allocation['HelperFlow'] # drop columns modified_fba_allocation = modified_fba_allocation.drop( columns="HelperFlow") return modified_fba_allocation
def allocation_helper(df_w_sector, method, attr, v): """ Used when two df required to create allocation ratio :param df_w_sector: :param method: currently written for 'multiplication' and 'proportional' :param attr: :return: """ from flowsa.Blackhurst_IO import scale_blackhurst_results_to_usgs_values from flowsa.BLS_QCEW import clean_bls_qcew_fba, bls_clean_allocation_fba_w_sec from flowsa.mapping import add_sectors_to_flowbyactivity helper_allocation = flowsa.getFlowByActivity(flowclass=[attr['helper_source_class']], datasource=attr['helper_source'], years=[attr['helper_source_year']]) if 'clean_helper_fba' in attr: log.info("Cleaning " + attr['helper_source'] + ' FBA') # tmp hard coded - need to generalize if attr['helper_source'] == 'BLS_QCEW': helper_allocation = clean_bls_qcew_fba(helper_allocation, attr) # helper_allocation = getattr(sys.modules[__name__], attr["clean_helper_fba"])(helper_allocation, attr) # clean df helper_allocation = clean_df(helper_allocation, flow_by_activity_fields, fba_fill_na_dict) # drop rows with flowamount = 0 helper_allocation = helper_allocation[helper_allocation['FlowAmount'] != 0] # agg data if necessary or filter # determine to scale to_scale = min(fips_number_key[attr['allocation_from_scale']], fips_number_key[v['geoscale_to_use']]) if fips_number_key[attr['helper_from_scale']] > to_scale: helper_allocation = agg_by_geoscale(helper_allocation, attr['helper_from_scale'], list(fips_number_key.keys())[list(fips_number_key.values()).index(to_scale)], fba_default_grouping_fields) else: helper_allocation = filter_by_geoscale(helper_allocation, attr['helper_from_scale']) # assign naics to allocation dataset helper_allocation = add_sectors_to_flowbyactivity(helper_allocation, sectorsourcename=method['target_sector_source']) # generalize activity field names to enable link to water withdrawal table helper_allocation = generalize_activity_field_names(helper_allocation) # clean up helper fba with sec if 'clean_helper_fba_wsec' in attr: log.info("Cleaning " + attr['helper_source'] + ' FBA with sectors') # tmp hard coded - need to generalize if attr['helper_source'] == 'BLS_QCEW': helper_allocation = bls_clean_allocation_fba_w_sec(helper_allocation, attr, method) # helper_allocation = getattr(sys.modules[__name__], attr["clean_helper_fba_wsec"])(helper_allocation, attr, method) # drop columns helper_allocation = helper_allocation.drop(columns=['Activity', 'Min', 'Max']) if attr['helper_method'] == 'proportional': # if calculating proportion, first subset the helper allocation df to only contain relevant sectors # create list of sectors in the flow allocation df, drop any rows of data in the flow df that \ # aren't in list sector_list = df_w_sector['Sector'].unique().tolist() # subset fba allocation table to the values in the activity list, based on overlapping sectors helper_allocation = helper_allocation.loc[helper_allocation['Sector'].isin(sector_list)] # calculate proportional ratios helper_allocation = proportional_allocation_by_location_and_sector(helper_allocation, 'Sector') # rename column helper_allocation = helper_allocation.rename(columns={"FlowAmount": 'HelperFlow'}) merge_columns = [e for e in ['Location','Sector', 'HelperFlow', 'FlowAmountRatio'] if e in helper_allocation.columns.values.tolist()] # merge allocation df with helper df based on sectors, depending on geo scales of dfs if (attr['helper_from_scale'] == 'state') and (attr['allocation_from_scale'] == 'county'): helper_allocation.loc[:, 'Location_tmp'] = helper_allocation['Location'].apply(lambda x: x[0:2]) df_w_sector.loc[:, 'Location_tmp'] = df_w_sector['Location'].apply(lambda x: x[0:2]) merge_columns.append('Location_tmp') modified_fba_allocation = df_w_sector.merge(helper_allocation[merge_columns], how='left') modified_fba_allocation = modified_fba_allocation.drop(columns=['Location_tmp']) else: modified_fba_allocation = df_w_sector.merge(helper_allocation[merge_columns], how='left') # modify flow amounts using helper data if 'multiplication' in attr['helper_method']: # todo: modify so if missing data, replaced with value from one geoscale up instead of national # todo: modify year after merge if necessary # if missing values (na or 0), replace with national level values replacement_values = helper_allocation[helper_allocation['Location'] == US_FIPS].reset_index( drop=True) replacement_values = replacement_values.rename(columns={"HelperFlow": 'ReplacementValue'}) modified_fba_allocation = modified_fba_allocation.merge( replacement_values[['Sector', 'ReplacementValue']], how='left') modified_fba_allocation.loc[:, 'HelperFlow'] = modified_fba_allocation['HelperFlow'].fillna( modified_fba_allocation['ReplacementValue']) modified_fba_allocation.loc[:, 'HelperFlow'] = np.where(modified_fba_allocation['HelperFlow'] == 0, modified_fba_allocation['ReplacementValue'], modified_fba_allocation['HelperFlow']) # replace non-existent helper flow values with a 0, so after multiplying, don't have incorrect value associated # with new unit modified_fba_allocation['HelperFlow'] = modified_fba_allocation['HelperFlow'].fillna(value=0) modified_fba_allocation.loc[:, 'FlowAmount'] = modified_fba_allocation['FlowAmount'] * \ modified_fba_allocation['HelperFlow'] # drop columns modified_fba_allocation = modified_fba_allocation.drop(columns=["HelperFlow", 'ReplacementValue']) elif attr['helper_method'] == 'proportional': modified_fba_allocation['FlowAmountRatio'] = modified_fba_allocation['FlowAmountRatio'].fillna(0) modified_fba_allocation.loc[:, 'FlowAmount'] = modified_fba_allocation['FlowAmount'] * \ modified_fba_allocation['FlowAmountRatio'] modified_fba_allocation = modified_fba_allocation.drop(columns=["HelperFlow", 'FlowAmountRatio']) # drop rows of 0 modified_fba_allocation = modified_fba_allocation[modified_fba_allocation['FlowAmount'] != 0].reset_index(drop=True) # todo: change units modified_fba_allocation.loc[modified_fba_allocation['Unit'] == 'gal/employee', 'Unit'] = 'gal' # option to scale up fba values if 'scaled' in attr['helper_method']: log.info("Scaling " + attr['helper_source'] + ' to FBA values') # tmp hard coded - need to generalize if attr['helper_source'] == 'BLS_QCEW': modified_fba_allocation = scale_blackhurst_results_to_usgs_values(modified_fba_allocation, attr) # modified_fba_allocation = getattr(sys.modules[__name__], attr["scale_helper_results"])(modified_fba_allocation, attr) return modified_fba_allocation
def main(method_name): """ Creates a flowbysector dataset :param method_name: Name of method corresponding to flowbysector method yaml name :return: flowbysector """ log.info("Initiating flowbysector creation for " + method_name) # call on method method = load_method(method_name) # create dictionary of data and allocation datasets fb = method['source_names'] # Create empty list for storing fbs files fbs_list = [] for k, v in fb.items(): # pull fba data for allocation flows = load_source_dataframe(k, v) if v['data_format'] == 'FBA': # clean up fba, if specified in yaml if v["clean_fba_df_fxn"] != 'None': log.info("Cleaning up " + k + " FlowByActivity") flows = getattr(sys.modules[__name__], v["clean_fba_df_fxn"])(flows) flows = clean_df(flows, flow_by_activity_fields, fba_fill_na_dict) # if activity_sets are specified in a file, call them here if 'activity_set_file' in v: aset_names = pd.read_csv(flowbysectoractivitysetspath + v['activity_set_file'], dtype=str) # create dictionary of allocation datasets for different activities activities = v['activity_sets'] # subset activity data and allocate to sector for aset, attr in activities.items(): # subset by named activities if 'activity_set_file' in v: names = aset_names[aset_names['activity_set'] == aset]['name'] else: names = attr['names'] log.info("Preparing to handle subset of flownames " + ', '.join(map(str, names)) + " in " + k) # subset fba data by activity flows_subset = flows[ (flows[fba_activity_fields[0]].isin(names)) | (flows[fba_activity_fields[1]].isin(names))].reset_index( drop=True) # extract relevant geoscale data or aggregate existing data log.info("Subsetting/aggregating dataframe to " + attr['allocation_from_scale'] + " geoscale") flows_subset_geo = subset_df_by_geoscale( flows_subset, v['geoscale_to_use'], attr['allocation_from_scale']) # Add sectors to df activity, depending on level of specified sector aggregation log.info("Adding sectors to " + k) flow_subset_wsec = add_sectors_to_flowbyactivity( flows_subset_geo, sectorsourcename=method['target_sector_source']) # clean up fba with sectors, if specified in yaml if v["clean_fba_w_sec_df_fxn"] != 'None': log.info("Cleaning up " + k + " FlowByActivity with sectors") flow_subset_wsec = getattr(sys.modules[__name__], v["clean_fba_w_sec_df_fxn"])( flow_subset_wsec, attr) # map df to elementary flows log.info("Mapping flows in " + k + ' to federal elementary flow list') if 'fedefl_mapping' in v: mapping_files = v['fedefl_mapping'] else: mapping_files = k flow_subset_mapped = map_elementary_flows( flow_subset_wsec, mapping_files) # if allocation method is "direct", then no need to create alloc ratios, else need to use allocation # dataframe to create sector allocation ratios if attr['allocation_method'] == 'direct': log.info('Directly assigning ' + ', '.join(map(str, names)) + ' to sectors') fbs = flow_subset_mapped.copy() else: # determine appropriate allocation dataset log.info("Loading allocation flowbyactivity " + attr['allocation_source'] + " for year " + str(attr['allocation_source_year'])) fba_allocation = flowsa.getFlowByActivity( flowclass=[attr['allocation_source_class']], datasource=attr['allocation_source'], years=[attr['allocation_source_year'] ]).reset_index(drop=True) fba_allocation = clean_df(fba_allocation, flow_by_activity_fields, fba_fill_na_dict) # subset based on yaml settings if attr['allocation_flow'] != 'None': fba_allocation = fba_allocation.loc[ fba_allocation['FlowName'].isin( attr['allocation_flow'])] if attr['allocation_compartment'] != 'None': fba_allocation = fba_allocation.loc[ fba_allocation['Compartment'].isin( attr['allocation_compartment'])] # cleanup the fba allocation df, if necessary if 'clean_allocation_fba' in attr: log.info("Cleaning " + attr['allocation_source']) fba_allocation = getattr(sys.modules[__name__], attr["clean_allocation_fba"])( fba_allocation, attr) # reset index fba_allocation = fba_allocation.reset_index(drop=True) # check if allocation data exists at specified geoscale to use log.info("Checking if allocation data exists at the " + attr['allocation_from_scale'] + " level") check_if_data_exists_at_geoscale( fba_allocation, attr['allocation_from_scale']) # aggregate geographically to the scale of the flowbyactivty source, if necessary from_scale = attr['allocation_from_scale'] to_scale = v['geoscale_to_use'] # if allocation df is less aggregated than FBA df, aggregate allocation df to target scale if fips_number_key[from_scale] > fips_number_key[to_scale]: fba_allocation = agg_by_geoscale( fba_allocation, from_scale, to_scale, fba_default_grouping_fields) # else, if fba is more aggregated than allocation table, use fba as both to and from scale else: fba_allocation = filter_by_geoscale( fba_allocation, from_scale) # assign sector to allocation dataset # todo: add sectorsourcename col value log.info("Adding sectors to " + attr['allocation_source']) fba_allocation_wsec = add_sectors_to_flowbyactivity( fba_allocation, sectorsourcename=method['target_sector_source']) # generalize activity field names to enable link to main fba source log.info("Generalizing activity columns in subset of " + attr['allocation_source']) fba_allocation_wsec = generalize_activity_field_names( fba_allocation_wsec) # call on fxn to further clean up/disaggregate the fba allocation data, if exists if 'clean_allocation_fba_w_sec' in attr: log.info("Further disaggregating sectors in " + attr['allocation_source']) fba_allocation_wsec = getattr( sys.modules[__name__], attr["clean_allocation_fba_w_sec"])( fba_allocation_wsec, attr, method) # subset fba datasets to only keep the sectors associated with activity subset log.info("Subsetting " + attr['allocation_source'] + " for sectors in " + k) fba_allocation_subset = get_fba_allocation_subset( fba_allocation_wsec, k, names) # drop columns fba_allocation_subset = fba_allocation_subset.drop( columns=['Activity']) # if there is an allocation helper dataset, modify allocation df if attr['allocation_helper'] == 'yes': log.info( "Using the specified allocation help for subset of " + attr['allocation_source']) fba_allocation_subset = allocation_helper( fba_allocation_subset, method, attr, v) # create flow allocation ratios for each activity flow_alloc_list = [] for n in names: log.info("Creating allocation ratios for " + n) fba_allocation_subset_2 = get_fba_allocation_subset( fba_allocation_subset, k, [n]) if len(fba_allocation_subset_2) == 0: log.info("No data found to allocate " + n) else: flow_alloc = allocate_by_sector( fba_allocation_subset_2, attr['allocation_method']) flow_alloc = flow_alloc.assign(FBA_Activity=n) flow_alloc_list.append(flow_alloc) flow_allocation = pd.concat(flow_alloc_list) # check for issues with allocation ratios check_allocation_ratios(flow_allocation, aset, k) # create list of sectors in the flow allocation df, drop any rows of data in the flow df that \ # aren't in list sector_list = flow_allocation['Sector'].unique().tolist() # subset fba allocation table to the values in the activity list, based on overlapping sectors flow_subset_mapped = flow_subset_mapped.loc[ (flow_subset_mapped[fbs_activity_fields[0]]. isin(sector_list)) | (flow_subset_mapped[fbs_activity_fields[1]]. isin(sector_list))] # check if fba and allocation dfs have the same LocationSystem log.info( "Checking if flowbyactivity and allocation dataframes use the same location systems" ) check_if_location_systems_match(flow_subset_mapped, flow_allocation) # merge fba df w/flow allocation dataset log.info("Merge " + k + " and subset of " + attr['allocation_source']) fbs = flow_subset_mapped.merge( flow_allocation[[ 'Location', 'Sector', 'FlowAmountRatio', 'FBA_Activity' ]], left_on=[ 'Location', 'SectorProducedBy', 'ActivityProducedBy' ], right_on=['Location', 'Sector', 'FBA_Activity'], how='left') fbs = fbs.merge( flow_allocation[[ 'Location', 'Sector', 'FlowAmountRatio', 'FBA_Activity' ]], left_on=[ 'Location', 'SectorConsumedBy', 'ActivityConsumedBy' ], right_on=['Location', 'Sector', 'FBA_Activity'], how='left') # merge the flowamount columns fbs.loc[:, 'FlowAmountRatio'] = fbs[ 'FlowAmountRatio_x'].fillna(fbs['FlowAmountRatio_y']) # fill null rows with 0 because no allocation info fbs['FlowAmountRatio'] = fbs['FlowAmountRatio'].fillna(0) # check if fba and alloc dfs have data for same geoscales - comment back in after address the 'todo' # log.info("Checking if flowbyactivity and allocation dataframes have data at the same locations") # check_if_data_exists_for_same_geoscales(fbs, k, attr['names']) # drop rows where there is no allocation data fbs = fbs.dropna(subset=['Sector_x', 'Sector_y'], how='all').reset_index() # calculate flow amounts for each sector log.info("Calculating new flow amounts using flow ratios") fbs.loc[:, 'FlowAmount'] = fbs['FlowAmount'] * fbs[ 'FlowAmountRatio'] # drop columns log.info("Cleaning up new flow by sector") fbs = fbs.drop(columns=[ 'Sector_x', 'FlowAmountRatio_x', 'Sector_y', 'FlowAmountRatio_y', 'FlowAmountRatio', 'FBA_Activity_x', 'FBA_Activity_y' ]) # drop rows where flowamount = 0 (although this includes dropping suppressed data) fbs = fbs[fbs['FlowAmount'] != 0].reset_index(drop=True) # clean df fbs = clean_df(fbs, flow_by_sector_fields_w_activity, fbs_fill_na_dict) # aggregate df geographically, if necessary log.info("Aggregating flowbysector to " + method['target_geoscale'] + " level") if fips_number_key[v['geoscale_to_use']] < fips_number_key[ attr['allocation_from_scale']]: from_scale = v['geoscale_to_use'] else: from_scale = attr['allocation_from_scale'] to_scale = method['target_geoscale'] fbs_geo_agg = agg_by_geoscale( fbs, from_scale, to_scale, fbs_grouping_fields_w_activities) # aggregate data to every sector level log.info("Aggregating flowbysector to all sector levels") fbs_sec_agg = sector_aggregation( fbs_geo_agg, fbs_grouping_fields_w_activities) # add missing naics5/6 when only one naics5/6 associated with a naics4 fbs_agg = sector_disaggregation( fbs_sec_agg, flow_by_sector_fields_w_activity) # compare flowbysector with flowbyactivity check_for_differences_between_fba_load_and_fbs_output( flow_subset_mapped, fbs_agg, aset, k) # return sector level specified in method yaml # load the crosswalk linking sector lengths sector_list = get_sector_list(method['target_sector_level']) # subset df, necessary because not all of the sectors are NAICS and can get duplicate rows fbs_1 = fbs_agg.loc[ (fbs_agg[fbs_activity_fields[0]].isin(sector_list)) & (fbs_agg[fbs_activity_fields[1]].isin(sector_list) )].reset_index(drop=True) fbs_2 = fbs_agg.loc[ (fbs_agg[fbs_activity_fields[0]].isin(sector_list)) & (fbs_agg[fbs_activity_fields[1]].isnull())].reset_index( drop=True) fbs_3 = fbs_agg.loc[(fbs_agg[fbs_activity_fields[0]].isnull()) & (fbs_agg[fbs_activity_fields[1]].isin( sector_list))].reset_index(drop=True) fbs_sector_subset = pd.concat([fbs_1, fbs_2, fbs_3]) # check if losing data by subsetting at specified sector length log.info('Checking if losing data by subsetting dataframe') fbs_sector_subset_2 = check_if_losing_sector_data( fbs_agg, fbs_sector_subset, method['target_sector_level']) # set source name fbs_sector_subset_2.loc[:, 'SectorSourceName'] = method[ 'target_sector_source'] # drop activity columns del fbs_sector_subset_2[ 'ActivityProducedBy'], fbs_sector_subset_2[ 'ActivityConsumedBy'] log.info( "Completed flowbysector for activity subset with flows " + ', '.join(map(str, names))) fbs_list.append(fbs_sector_subset_2) else: # if the loaded flow dt is already in FBS format, append directly to list of FBS log.info("Append " + k + " to FBS list") fbs_list.append(flows) # create single df of all activities log.info("Concat data for all activities") fbss = pd.concat(fbs_list, ignore_index=True, sort=False) log.info("Clean final dataframe") # aggregate df as activities might have data for the same specified sector length fbss = clean_df(fbss, flow_by_sector_fields, fbs_fill_na_dict) fbss = aggregator(fbss, fbs_default_grouping_fields) # sort df log.info("Sort and store dataframe") # add missing fields, ensure correct data type, reorder columns fbss = fbss.sort_values( ['SectorProducedBy', 'SectorConsumedBy', 'Flowable', 'Context']).reset_index(drop=True) # save parquet file store_flowbysector(fbss, method_name)
def allocation_helper(df_w_sector, method, attr): """ Used when two df required to create allocation ratio :param df_w_sector: :param method: currently written for 'multiplication' :param attr: :return: """ from flowsa.mapping import add_sectors_to_flowbyactivity helper_allocation = flowsa.getFlowByActivity(flowclass=[attr['helper_source_class']], datasource=attr['helper_source'], years=[attr['helper_source_year']]) # clean df helper_allocation = clean_df(helper_allocation, flow_by_activity_fields, fba_fill_na_dict) # drop rows with flowamount = 0 helper_allocation = helper_allocation[helper_allocation['FlowAmount'] != 0] # assign naics to allocation dataset helper_allocation = add_sectors_to_flowbyactivity(helper_allocation, sectorsourcename=method[ 'target_sector_source'], levelofSectoragg=attr[ 'helper_sector_aggregation']) # generalize activity field names to enable link to water withdrawal table helper_allocation = generalize_activity_field_names(helper_allocation) # drop columns helper_allocation = helper_allocation.drop(columns=['Activity', 'Min', 'Max']) # rename column helper_allocation = helper_allocation.rename(columns={"FlowAmount": 'HelperFlow'}) # merge allocation df with helper df based on sectors, depending on geo scales of dfs if attr['helper_from_scale'] == 'national': modified_fba_allocation = df_w_sector.merge(helper_allocation[['Sector', 'HelperFlow']], how='left') if (attr['helper_from_scale'] == 'state') and (attr['allocation_from_scale'] == 'state'): modified_fba_allocation = df_w_sector.merge( helper_allocation[['Sector', 'Location', 'HelperFlow']], how='left') if (attr['helper_from_scale'] == 'state') and (attr['allocation_from_scale'] == 'county'): helper_allocation.loc[:, 'Location_tmp'] = helper_allocation['Location'].apply( lambda x: str(x[0:2])) df_w_sector.loc[:, 'Location_tmp'] = df_w_sector['Location'].apply(lambda x: str(x[0:2])) modified_fba_allocation = df_w_sector.merge( helper_allocation[['Sector', 'Location_tmp', 'HelperFlow']], how='left') modified_fba_allocation = modified_fba_allocation.drop(columns=['Location_tmp']) # todo: modify so if missing data, replaced with value from one geoscale up instead of national # if missing values (na or 0), replace with national level values replacement_values = helper_allocation[helper_allocation['Location'] == US_FIPS].reset_index( drop=True) replacement_values = replacement_values.rename(columns={"HelperFlow": 'ReplacementValue'}) modified_fba_allocation = modified_fba_allocation.merge( replacement_values[['Sector', 'ReplacementValue']], how='left') modified_fba_allocation.loc[:, 'HelperFlow'] = modified_fba_allocation['HelperFlow'].fillna( modified_fba_allocation['ReplacementValue']) modified_fba_allocation.loc[:, 'HelperFlow'] = np.where(modified_fba_allocation['HelperFlow'] == 0, modified_fba_allocation['ReplacementValue'], modified_fba_allocation['HelperFlow']) # modify flow amounts using helper data if attr['helper_method'] == 'multiplication': # replace non-existent helper flow values with a 0, so after multiplying, don't have incorrect value associated # with new unit modified_fba_allocation['HelperFlow'] = modified_fba_allocation['HelperFlow'].fillna( value=0) modified_fba_allocation.loc[:, 'FlowAmount'] = modified_fba_allocation['FlowAmount'] * \ modified_fba_allocation[ 'HelperFlow'] # drop columns modified_fba_allocation = modified_fba_allocation.drop( columns=["HelperFlow", 'ReplacementValue']) # drop rows of 0 to speed up allocation modified_fba_allocation = modified_fba_allocation[ modified_fba_allocation['FlowAmount'] != 0].reset_index(drop=True) #todo: modify the unit return modified_fba_allocation
def main(method_name): """ Creates a flowbysector dataset :param method_name: Name of method corresponding to flowbysector method yaml name :return: flowbysector """ log.info("Initiating flowbysector creation for " + method_name) # call on method method = load_method(method_name) # create dictionary of water data and allocation datasets fbas = method['flowbyactivity_sources'] # Create empty list for storing fbs files fbss = [] for k, v in fbas.items(): # pull water data for allocation log.info("Retrieving flowbyactivity for datasource " + k + " in year " + str(v['year'])) flows = flowsa.getFlowByActivity(flowclass=[v['class']], years=[v['year']], datasource=k) # if necessary, standardize names in data set if v['activity_name_standardization_fxn'] != 'None': log.info("Standardizing activity names in " + k) flows = getattr(sys.modules[__name__], v['activity_name_standardization_fxn'])(flows) # drop description field flows = flows.drop(columns='Description') # fill null values flows = flows.fillna(value=fba_fill_na_dict) # map df to elementary flows - commented out until mapping complete # log.info("Mapping flows in " + k + ' to federal elementary flow list') # flows_mapped = map_elementary_flows(flows, k) # convert unit todo: think about unit conversion here log.info("Converting units in " + k) flows = convert_unit(flows) # create dictionary of allocation datasets for different activities activities = v['activity_sets'] for aset, attr in activities.items(): # subset by named activities names = [attr['names']] log.info("Preparing to handle subset of flownames " + ', '.join(map(str, names)) + " in " + k) # subset usgs data by activity flow_subset = flows[(flows[fba_activity_fields[0]].isin(names)) | (flows[fba_activity_fields[1]].isin(names))] # Reset index values after subset flow_subset = flow_subset.reset_index(drop=True) # check if flowbyactivity data exists at specified geoscale to use log.info("Checking if flowbyactivity data exists for " + ', '.join(map(str, names)) + " at the " + v['geoscale_to_use'] + ' level') geocheck = check_if_data_exists_at_geoscale(flow_subset, names, v['geoscale_to_use']) # aggregate geographically to the scale of the allocation dataset if geocheck == "Yes": activity_from_scale = v['geoscale_to_use'] else: # if activity does not exist at specified geoscale, issue warning and use data at less aggregated # geoscale, and sum to specified geoscale log.info("Checking if flowbyactivity data exists for " + ', '.join(map(str, names)) + " at a less aggregated level") new_geoscale_to_use = check_if_data_exists_at_less_aggregated_geoscale(flow_subset, names, v['geoscale_to_use']) activity_from_scale = new_geoscale_to_use activity_to_scale = attr['allocation_from_scale'] # if usgs is less aggregated than allocation df, aggregate usgs activity to target scale if fips_number_key[activity_from_scale] > fips_number_key[activity_to_scale]: log.info("Aggregating subset from " + activity_from_scale + " to " + activity_to_scale) flow_subset = agg_by_geoscale(flow_subset, activity_from_scale, activity_to_scale, fba_default_grouping_fields, names) # else, aggregate to geoscale want to use elif fips_number_key[activity_from_scale] > fips_number_key[v['geoscale_to_use']]: log.info("Aggregating subset from " + activity_from_scale + " to " + v['geoscale_to_use']) flow_subset = agg_by_geoscale(flow_subset, activity_from_scale, v['geoscale_to_use'], fba_default_grouping_fields, names) # else, if usgs is more aggregated than allocation table, filter relevant rows else: log.info("Filtering out " + activity_from_scale + " data") flow_subset = filter_by_geoscale(flow_subset, activity_from_scale, names) # location column pad zeros if necessary flow_subset['Location'] = flow_subset['Location'].apply(lambda x: x.ljust(3 + len(x), '0') if len(x) < 5 else x ) # Add sectors to usgs activity, creating two versions of the flow subset # the first version "flow_subset" is the most disaggregated version of the Sectors (NAICS) # the second version, "flow_subset_agg" includes only the most aggregated level of sectors log.info("Adding sectors to " + k + " for " + ', '.join(map(str, names))) flow_subset_wsec = add_sectors_to_flowbyactivity(flow_subset, sectorsourcename=method['target_sector_source']) flow_subset_wsec_agg = add_sectors_to_flowbyactivity(flow_subset, sectorsourcename=method['target_sector_source'], levelofSectoragg='agg') # if allocation method is "direct", then no need to create alloc ratios, else need to use allocation # dataframe to create sector allocation ratios if attr['allocation_method'] == 'direct': fbs = flow_subset_wsec_agg.copy() else: # determine appropriate allocation dataset log.info("Loading allocation flowbyactivity " + attr['allocation_source'] + " for year " + str(attr['allocation_source_year'])) fba_allocation = flowsa.getFlowByActivity(flowclass=[attr['allocation_source_class']], datasource=attr['allocation_source'], years=[attr['allocation_source_year']]).reset_index(drop=True) # fill null values fba_allocation = fba_allocation.fillna(value=fba_fill_na_dict) # convert unit fba_allocation = convert_unit(fba_allocation) # subset based on yaml settings if attr['allocation_flow'] != 'None': fba_allocation = fba_allocation.loc[fba_allocation['FlowName'].isin(attr['allocation_flow'])] if attr['allocation_compartment'] != 'None': fba_allocation = fba_allocation.loc[ fba_allocation['Compartment'].isin(attr['allocation_compartment'])] # reset index fba_allocation = fba_allocation.reset_index(drop=True) # check if allocation data exists at specified geoscale to use log.info("Checking if" + " allocation data exists for " + ', '.join(map(str, names)) + " at the " + attr['allocation_from_scale'] + " level") check_if_data_exists_at_geoscale(fba_allocation, names, attr['allocation_from_scale']) # aggregate geographically to the scale of the flowbyactivty source, if necessary from_scale = attr['allocation_from_scale'] to_scale = v['geoscale_to_use'] # if allocation df is less aggregated than FBA df, aggregate allocation df to target scale if fips_number_key[from_scale] > fips_number_key[to_scale]: fba_allocation = agg_by_geoscale(fba_allocation, from_scale, to_scale, fba_default_grouping_fields, names) # else, if usgs is more aggregated than allocation table, use usgs as both to and from scale else: fba_allocation = filter_by_geoscale(fba_allocation, from_scale, names) # assign sector to allocation dataset log.info("Adding sectors to " + attr['allocation_source']) fba_allocation = add_sectors_to_flowbyactivity(fba_allocation, sectorsourcename=method['target_sector_source'], levelofSectoragg=attr[ 'allocation_sector_aggregation']) # subset fba datsets to only keep the naics associated with usgs activity subset log.info("Subsetting " + attr['allocation_source'] + " for sectors in " + k) fba_allocation_subset = get_fba_allocation_subset(fba_allocation, k, names) # Reset index values after subset fba_allocation_subset = fba_allocation_subset.reset_index(drop=True) # generalize activity field names to enable link to water withdrawal table log.info("Generalizing activity names in subset of " + attr['allocation_source']) fba_allocation_subset = generalize_activity_field_names(fba_allocation_subset) # drop columns fba_allocation_subset = fba_allocation_subset.drop(columns=['Activity']) # if there is an allocation helper dataset, modify allocation df if attr['allocation_helper'] == 'yes': log.info("Using the specified allocation help for subset of " + attr['allocation_source']) fba_allocation_subset = allocation_helper(fba_allocation_subset, method, attr) # create flow allocation ratios log.info("Creating allocation ratios for " + attr['allocation_source']) flow_allocation = allocate_by_sector(fba_allocation_subset, attr['allocation_method']) # create list of sectors in the flow allocation df, drop any rows of data in the flow df that \ # aren't in list sector_list = flow_allocation['Sector'].unique().tolist() # subset fba allocation table to the values in the activity list, based on overlapping sectors flow_subset_wsec = flow_subset_wsec.loc[ (flow_subset_wsec[fbs_activity_fields[0]].isin(sector_list)) | (flow_subset_wsec[fbs_activity_fields[1]].isin(sector_list))] # check if fba and allocation dfs have the same LocationSystem log.info("Checking if flowbyactivity and allocation dataframes use the same location systems") check_if_location_systems_match(flow_subset_wsec, flow_allocation) # merge water withdrawal df w/flow allocation dataset log.info("Merge " + k + " and subset of " + attr['allocation_source']) fbs = flow_subset_wsec.merge( flow_allocation[['Location', 'LocationSystem', 'Sector', 'FlowAmountRatio']], left_on=['Location', 'LocationSystem', 'SectorProducedBy'], right_on=['Location', 'LocationSystem', 'Sector'], how='left') fbs = fbs.merge( flow_allocation[['Location', 'LocationSystem', 'Sector', 'FlowAmountRatio']], left_on=['Location', 'LocationSystem', 'SectorConsumedBy'], right_on=['Location', 'LocationSystem', 'Sector'], how='left') # drop columns where both sector produced/consumed by in flow allocation dif is null fbs = fbs.dropna(subset=['Sector_x', 'Sector_y'], how='all').reset_index() # merge the flowamount columns fbs['FlowAmountRatio'] = fbs['FlowAmountRatio_x'].fillna(fbs['FlowAmountRatio_y']) fbs['FlowAmountRatio'] = fbs['FlowAmountRatio'].fillna(0) # calculate flow amounts for each sector log.info("Calculating new flow amounts using flow ratios") fbs['FlowAmount'] = fbs['FlowAmount'] * fbs['FlowAmountRatio'] # drop columns log.info("Cleaning up new flow by sector") fbs = fbs.drop(columns=['Sector_x', 'FlowAmountRatio_x', 'Sector_y', 'FlowAmountRatio_y', 'FlowAmountRatio', 'ActivityProducedBy', 'ActivityConsumedBy']) # rename flow name to flowable fbs = fbs.rename(columns={"FlowName": 'Flowable', "Compartment": "Context" }) # drop rows where flowamount = 0 (although this includes dropping suppressed data) fbs = fbs[fbs['FlowAmount'] != 0].reset_index(drop=True) # add missing data columns fbs = add_missing_flow_by_fields(fbs, flow_by_sector_fields) # fill null values fbs = fbs.fillna(value=fbs_fill_na_dict) # aggregate df geographically, if necessary log.info("Aggregating flowbysector to " + method['target_geoscale'] + " level") if fips_number_key[v['geoscale_to_use']] < fips_number_key[attr['allocation_from_scale']]: from_scale = v['geoscale_to_use'] else: from_scale = attr['allocation_from_scale'] to_scale = method['target_geoscale'] fbs = agg_by_geoscale(fbs, from_scale, to_scale, fbs_default_grouping_fields, names) # aggregate data to every sector level log.info("Aggregating flowbysector to " + method['target_sector_level']) fbs = sector_aggregation(fbs, fbs_default_grouping_fields) # test agg by sector sector_agg_comparison = sector_flow_comparision(fbs) # return sector level specified in method yaml # load the crosswalk linking sector lengths cw = load_sector_length_crosswalk() sector_list = cw[method['target_sector_level']].unique().tolist() # add any non-NAICS sectors used with NAICS household = load_household_sector_codes() household = household.loc[household['NAICS_Level_to_Use_For'] == method['target_sector_level']] # add household sector to sector list sector_list.extend(household['Code'].tolist()) # subset df fbs = fbs.loc[(fbs[fbs_activity_fields[0]].isin(sector_list)) | (fbs[fbs_activity_fields[1]].isin(sector_list))].reset_index(drop=True) # add any missing columns of data and cast to appropriate data type fbs = add_missing_flow_by_fields(fbs, flow_by_sector_fields) log.info("Completed flowbysector for activity subset with flows " + ', '.join(map(str, names))) fbss.append(fbs) # create single df of all activities fbss = pd.concat(fbss, ignore_index=True, sort=False) # aggregate df as activities might have data for the same specified sector length fbss = aggregator(fbss, fbs_default_grouping_fields) # sort df fbss = fbss.sort_values( ['SectorProducedBy', 'SectorConsumedBy', 'Flowable', 'Context']).reset_index(drop=True) # save parquet file store_flowbysector(fbss, method_name)