subset_parcels = pd.read_csv(Subset_Parcels_File_Name, sep=',')
subset_parcels = subset_parcels.loc[~subset_parcels['PSRC_ID'].isnull()]
subset_parcels.columns = [i.upper() for i in subset_parcels.columns]
subset_parcels = subset_parcels.groupby('PSRC_ID')[lu_type_list].sum()
updated_parcels = parcels.loc[~parcels.index.isin(subset_parcels.index)]
updated_parcels = updated_parcels.append(subset_parcels)
updated_parcels = updated_parcels.fillna(0)
final_updated_parcels = updated_parcels.reset_index().merge(
    parcel_lookup_df[['PSRC_ID', 'JURISDICTION']],
    how='left',
    left_on='PSRC_ID',
    right_on='PSRC_ID')

subset_new_tot_sqft = subset_parcels[lu_type_list].sum().sum()
print 'Subset new sqft {0:.0f}'.format(subset_new_tot_sqft)
subset_old_tot_sqft = parcels.loc[parcels.index.isin(subset_parcels.index),
                                  lu_type_list].sum().sum()
print 'Subset old sqft {0:.0f}'.format(subset_old_tot_sqft)

updated_tot_sqft = final_updated_parcels[lu_type_list].sum().sum()
print 'Updated total sqft {0:.0f}'.format(updated_tot_sqft)

print "Exporting updated urbansim parcel file ..."
final_updated_parcels.to_csv(os.path.join(Output_Parcel_Folder,
                                          Output_Parcel_File_Name),
                             sep=',')

utility.backupScripts(
    __file__, os.path.join(Output_Parcel_Folder, os.path.basename(__file__)))

print "Finished."
print 'parcels to be replaced have ' + str(oldjobs) + ' jobs'
print 'jobs gained ' + str(newjobs - oldjobs)
updated_parcel_df.loc[updated_parcel_df.index.isin(new_parcel_data_df.index),
                      Columns_List] = new_parcel_data_df[Columns_List]

if Set_Jobs_to_Zeros_All_Bel_Parcels_Not_in_New_Parcel_Data_File == True:
    jobs_to_be_zeroed_out = updated_parcel_df.loc[
        updated_parcel_df.index.isin(missing_bellevue_parcels_df['PARCELID']),
        'EMPTOT_P'].sum()
    updated_parcel_df.loc[
        updated_parcel_df.index.isin(missing_bellevue_parcels_df['PARCELID']),
        Columns_List] = 0
    print '-----------------------------------------'
    print 'Some COB parcels are not provided in the ' + New_parcel_data_file_name + '.'
    print 'But they exist in ' + Original_parcel_file_name + '.'
    print 'Number of jobs in these parcels are now zeroed out: ' + str(
        jobs_to_be_zeroed_out)

print 'total jobs before change: ' + str(
    original_parcel_data_df['EMPTOT_P'].sum())
print 'total jobs after change: ' + str(updated_parcel_df['EMPTOT_P'].sum())
print 'Exporting parcel files...'
updated_parcel_df.to_csv(os.path.join(working_folder,
                                      Updated_parcel_file_name),
                         sep=' ')

utility.backupScripts(__file__,
                      os.path.join(working_folder, os.path.basename(__file__)))

print 'Done'
Esempio n. 3
0
print "Exporting updated urbansim parcel file ..."
parcels.to_csv(os.path.join(Output_Parcel_Folder, Output_Parcel_File_Name),
               index=False,
               sep=' ')
print 'Total jobs in updated parcel file are {0:.0f}'.format(
    parcels['EMPTOT_P'].sum())

# backup input files inside input folder
print "Backup input files ..."
input_backup_folder = os.path.join(Output_Parcel_Folder, 'inputs')
if not os.path.exists(input_backup_folder):
    os.makedirs(input_backup_folder)
copyfile(os.path.join(Common_Data_Folder, Original_ESD_Parcel_File_Name),
         os.path.join(input_backup_folder, Original_ESD_Parcel_File_Name))
copyfile(os.path.join(Common_Data_Folder, Conversion_Factors_File_Name),
         os.path.join(input_backup_folder, Conversion_Factors_File_Name))
copyfile(
    os.path.join(Common_Data_Folder, Subarea_Adjustment_Factor_File_Name),
    os.path.join(input_backup_folder, Subarea_Adjustment_Factor_File_Name))
copyfile(
    Parcels_Sqft_File_Name,
    os.path.join(input_backup_folder,
                 os.path.basename(Parcels_Sqft_File_Name)))
copyfile(
    Hh_and_person_file,
    os.path.join(input_backup_folder, os.path.basename(Hh_and_person_file)))
utility.backupScripts(
    __file__, os.path.join(input_backup_folder, os.path.basename(__file__)))

print "Finished"
                             index_col="BKRCastTAZ")

# drop four parking attributes
update_parcel_df = original_parcel_df.drop(Parking_Cost_Attributes, axis=1)

parking_cost_df = pd.read_csv(Parking_Cost_Source,
                              sep=" ",
                              index_col="PARCELID")
parking_cost_df = parking_cost_df[Parking_Cost_Attributes]

update_parcel_df = update_parcel_df.join(parking_cost_df)
update_parcel_df.fillna(0, inplace=True)

if SET_BELLEVUE_PARKING_COST_HALF == True:
    update_parcel_df = update_parcel_df.join(taz_subarea_df['Jurisdiction'],
                                             on='TAZ_P')
    update_parcel_df.loc[update_parcel_df['Jurisdiction'] == 'BELLEVUE',
                         'PPRICDYP'] = update_parcel_df['PPRICDYP'] * 0.5
    update_parcel_df.loc[update_parcel_df['Jurisdiction'] == 'BELLEVUE',
                         'PPRICHRP'] = update_parcel_df['PPRICHRP'] * 0.5
    update_parcel_df = update_parcel_df.drop(['Jurisdiction'], axis=1)

print 'Exporting updated parcel file...'
update_parcel_df.to_csv(os.path.join(Project_Folder, Updated_Parcel_File_Name),
                        sep=" ")

utility.backupScripts(__file__,
                      os.path.join(Project_Folder, os.path.basename(__file__)))

print 'Done'
the households in parcel file is consistent with synthetic population file.
'''

Hh_and_person_file = r"I:\Modeling and Analysis Group\01_BKRCast\BKRPopSim\PopulationSim_BaseData\2018TFPSensitivity-reducedMF\2018tFPSensitivity_reducedMF_hh_and_persons.h5"
parcel_folder = r'Z:\Modeling Group\BKRCast\LandUse\TFP\2018TFP sensitivity-reducedSQFT&MF'
input_parcel_file = 'parcels_urbansim.txt'
output_parcel_file = 'parcels_urbansim.txt'

print 'Loading hh_and_persons.h5...'
hdf_file = h5py.File(Hh_and_person_file, "r")
hh_df = utility.h5_to_df(hdf_file, 'Household')
hh_df.set_index('hhparcel', inplace = True)

print 'Updating number of households...'
hhs = hh_df.groupby('hhparcel')['hhexpfac', 'hhsize'].sum()
parcel_df = pd.read_csv(os.path.join(parcel_folder, input_parcel_file), sep = ' ')
parcel_df.set_index('PARCELID', inplace = True)
parcel_df = parcel_df.join(hhs, how = 'left')

parcel_df['HH_P']  = parcel_df['hhexpfac']
parcel_df.fillna(0, inplace = True)
parcel_df.drop(['hhexpfac', 'hhsize'], axis = 1, inplace = True)


print 'Exporting future parcel file...'
parcel_df.to_csv(os.path.join(parcel_folder, output_parcel_file), sep = ' ')

utility.backupScripts(__file__, os.path.join(parcel_folder, os.path.basename(__file__)))

print 'Done.'
Esempio n. 6
0
parcel_df = pd.read_csv(original_parcel_file_name, sep=' ')
parcel_df.drop(['Unnamed: 0', 'index'], axis=1, inplace=True)
ratio_df = pd.read_csv(ratio_file_name)
updated_parcel_df = parcel_df.merge(
    ratio_df[['BKRCastTAZ', ratio_attribute_name]],
    left_on='TAZ_P',
    right_on='BKRCastTAZ',
    how='inner')

total_jobs_before = updated_parcel_df['EMPTOT_P'].sum()
print 'Total jobs in the original file: ' + str(total_jobs_before)
for job in Job_Field:
    updated_parcel_df[
        job] = updated_parcel_df[job] * updated_parcel_df[ratio_attribute_name]
updated_parcel_df[Job_Field] = updated_parcel_df[Job_Field].round(0).astype(
    int)
total_jobs_after = updated_parcel_df['EMPTOT_P'].sum()
print 'Total jobs after the adjustment: ' + str(total_jobs_after)
print 'Job increase: ' + str(total_jobs_after - total_jobs_before)
print 'Exporting ...'
updated_parcel_df.drop(['BKRCastTAZ', ratio_attribute_name],
                       axis=1,
                       inplace=True)
updated_parcel_df.to_csv(updated_parcel_file_name, index=False, sep=' ')

utility.backupScripts(
    __file__,
    os.path.join(os.path.dirname(updated_parcel_file_name),
                 os.path.basename(__file__)))
print 'Done.'
Esempio n. 7
0
ofm_df = ofm_df.merge(base_hhs_by_geoid10,
                      how='left',
                      left_on='GEOID10',
                      right_index=True)

if target_year <= future_year and target_year >= base_year:
    # right between the bookends.
    print 'interpolating...'
else:
    print 'extropolating...'

ofm_df.fillna(0, inplace=True)
ratio = (target_year - base_year) * 1.0 / (future_year - base_year)
ofm_df['OFM_groupquarters'] = 0
ofm_df['OFM_hhs'] = (
    (ofm_df['future_total_hhs'] - ofm_df['base_total_hhs']) * ratio +
    ofm_df['base_total_hhs']).round(0)
ofm_df['OFM_persons'] = (
    (ofm_df['future_total_persons'] - ofm_df['base_total_persons']) * ratio +
    ofm_df['base_total_persons']).round(0)

print 'Interpolated total hhs: ' + str(ofm_df['OFM_hhs'].sum())
print 'Interpolated total persons: ' + str(ofm_df['OFM_persons'].sum())
ofm_df[['GEOID10', 'OFM_groupquarters', 'OFM_hhs',
        'OFM_persons']].to_csv(interploated_ofm_estimate_by_GEOID, index=False)
utility.backupScripts(
    __file__,
    os.path.join(os.path.dirname(interploated_ofm_estimate_by_GEOID),
                 os.path.basename(__file__)))

print 'Done'