def run_future_calculations(self, options, **kwargs): self.features = self.end_state_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0)) # TODO Use the first PolicySet--this needs to be done better self.policy_set = self.config_entity.computed_policy_sets()[0].policy_by_key('energy') self.policy_assumptions = self.policy_set.get_building_efficiency_assumptions(self.METRICS) self.base_year = self.config_entity.scenario.project.base_year self.future_year = self.config_entity.scenario.year self.increment = self.future_year - self.base_year self.annualize_efficiencies() annotated_features = annotated_related_feature_class_pk_via_geographies(self.features, self.config_entity, [ DbEntityKey.BASE_CANVAS, DbEntityKey.CLIMATE_ZONES]) energy_output_list = [] approx_fifth = int(annotated_features.count() / 14 - 1) if annotated_features.count() > 30 else 1 i = 1 for feature in annotated_features.iterator(): self.feature = feature self.result_dict = defaultdict(lambda: float(0)) if i % approx_fifth == 0: self.report_progress(0.05, **kwargs) self.base_canvas = self.base_class.objects.get(id=feature.base_canvas) try: climate_rates = self.collect_climate_rates(feature) except ObjectDoesNotExist: continue self.future_occupancy_rate = float(self.feature.hh / self.feature.du if self.feature.du else 0) self.base_occupancy_rate = float(self.base_canvas.hh / self.base_canvas.du if self.base_canvas.du else 0) # self.energy_input = self.get_energy_input(feature) self.feature_dict = self.get_feature_input(feature) for category in self.COMMERCIAL_TYPES: self.feature_dict.update({ category + "_redev": self.redev_units('bldg_sqft_' + category, self.feature, self.base_canvas), category + "_new": self.new_units('bldg_sqft_' + category, self.feature, self.base_canvas), category + "_base": float(getattr(self.base_canvas, 'bldg_sqft_' + category)) }) for category in self.RESIDENTIAL_TYPES: self.feature_dict.update({ category + "_redev": self.redev_units(category, self.feature, self.base_canvas) * self.future_occupancy_rate, category + "_new": self.new_units(category, self.feature, self.base_canvas) * self.future_occupancy_rate, category + "_base": float(getattr(self.base_canvas, category)) * self.base_occupancy_rate }) self.policy_assumptions.update(climate_rates) self.calculate_future_energy() self.calculate_visualized_field() self.result_dict.update(self.feature_dict) self.result_dict.update(self.policy_assumptions) output_row = map(lambda key: self.result_dict[key], self.output_fields) energy_output_list.append(output_row) i += 1 return energy_output_list, options
def update(self, **kwargs): """ :param: kwargs 'ids' is required. They contain the EndStateFeature ids that were updated """ logger.info("Executing Scenario Updater (aka Core) using {0}".format( self.config_entity)) # Get the EndState Feature ids ids = kwargs['ids'] config_entity = kwargs['analysis_module'].config_entity feature_class = config_entity.db_entity_feature_class( DbEntityKey.END_STATE) features = feature_class.objects.filter(id__in=ids) annotated_features = annotated_related_feature_class_pk_via_geographies( features, config_entity, [DbEntityKey.INCREMENT, DbEntityKey.BASE_CANVAS]) self.progress(0.33, **kwargs) update_future_scenario(self.config_entity, annotated_features) self.progress(0.33, **kwargs) update_increment_feature(self.config_entity, annotated_features) self.progress(0.34, **kwargs) logger.info("Executed Scenario Updater using {0}".format( self.config_entity))
def run_base_calculations(self, options, **kwargs): features = self.base_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0)) annotated_features = annotated_related_feature_class_pk_via_geographies( features, self.config_entity, [DbEntityKey.CLIMATE_ZONES]) energy_output_list = [] approx_fifth = int(annotated_features.count() / 14 - 1) if annotated_features.count() > 30 else 1 i = 1 for feature in annotated_features.iterator(): self.result_dict = defaultdict(lambda: float(0)) self.feature = feature if i % approx_fifth == 0: self.report_progress(0.05, **kwargs) try: climate_rates = self.collect_climate_rates(feature) except ObjectDoesNotExist: continue occupancy_rate = self.feature.hh / self.feature.du if self.feature.du else 0 residential_units = { field: float(getattr(feature, field) * occupancy_rate) for field in self.RESIDENTIAL_TYPES } commercial_bldg_sqft = { field: float(getattr(feature, 'bldg_sqft_' + field)) for field in self.COMMERCIAL_TYPES } self.feature_dict = self.get_feature_input(feature) self.feature_dict.update(climate_rates) self.feature_dict.update(residential_units) self.feature_dict.update(commercial_bldg_sqft) self.calculate_base_use() self.calculate_visualized_field() output_row = map(lambda key: self.result_dict[key], self.output_fields) energy_output_list.append(output_row) i += 1 self.calculate_visualized_field() return energy_output_list, options
def run_base_calculations(self, options, **kwargs): features = self.base_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0)) annotated_features = annotated_related_feature_class_pk_via_geographies(features, self.config_entity, [ DbEntityKey.CLIMATE_ZONES]) energy_output_list = [] approx_fifth = int(annotated_features.count() / 14 - 1) if annotated_features.count() > 30 else 1 i = 1 for feature in annotated_features.iterator(): self.result_dict = defaultdict(lambda: float(0)) self.feature = feature if i % approx_fifth == 0: self.report_progress(0.05, **kwargs) try: climate_rates = self.collect_climate_rates(feature) except ObjectDoesNotExist: continue occupancy_rate = self.feature.hh / self.feature.du if self.feature.du else 0 residential_units = { field: float(getattr(feature, field) * occupancy_rate) for field in self.RESIDENTIAL_TYPES } commercial_bldg_sqft = { field: float(getattr(feature, 'bldg_sqft_'+field)) for field in self.COMMERCIAL_TYPES } self.feature_dict = self.get_feature_input(feature) self.feature_dict.update(climate_rates) self.feature_dict.update(residential_units) self.feature_dict.update(commercial_bldg_sqft) self.calculate_base_use() self.calculate_visualized_field() output_row = map(lambda key: self.result_dict[key], self.output_fields) energy_output_list.append(output_row) i += 1 self.calculate_visualized_field() return energy_output_list, options
def update(self, **kwargs): logger.info("Executing SED Updater using {0}".format( self.config_entity)) config_entity = kwargs['analysis_module'].config_entity feature_class = config_entity.db_entity_feature_class( DbEntityKey.TIER2_TAZ) features = feature_class.objects.filter() annotated_features = annotated_related_feature_class_pk_via_geographies( features, config_entity, [DbEntityKey.CITY_BOUNDARY]) self.progress(0.66, **kwargs) update_sed_feature(self.config_entity, annotated_features) self.progress(0.34, **kwargs) logger.info("Executed SED Updater using {0}".format( self.config_entity))
def update(self, **kwargs): """ :param: kwargs 'ids' is required. They contain the EndStateFeature ids that were updated """ logger.info("Executing Scenario Updater (aka Core) using {0}".format(self.config_entity)) # Get the EndState Feature ids ids = kwargs['ids'] config_entity = kwargs['analysis_module'].config_entity feature_class = config_entity.db_entity_feature_class(DbEntityKey.END_STATE) features = feature_class.objects.filter(id__in=ids) annotated_features = annotated_related_feature_class_pk_via_geographies(features, config_entity, [ DbEntityKey.INCREMENT, DbEntityKey.BASE_CANVAS]) self.progress(0.33, **kwargs) update_future_scenario(self.config_entity, annotated_features) self.progress(0.33, **kwargs) update_increment_feature(self.config_entity, annotated_features) self.progress(0.34, **kwargs) logger.info("Executed Scenario Updater using {0}".format(self.config_entity))
def update(self, **kwargs): # Make sure all related models have been created before querying logger.info("Executing Vmt using {0}".format(self.config_entity)) self.vmt_progress(0.1, **kwargs) vmt_result_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT) vmt_variables_feature_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_VARIABLES) census_rates_feature_class = self.config_entity.db_entity_feature_class(DbEntityKey.CENSUS_RATES) if isinstance(self.config_entity.subclassed, FutureScenario): scenario_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE) trip_lengths_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_FUTURE_TRIP_LENGTHS) transit_stop_class = self.config_entity.db_entity_feature_class(DbEntityKey.FUTURE_TRANSIT_STOPS) is_future = True else: scenario_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS) trip_lengths_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_BASE_TRIP_LENGTHS) transit_stop_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_TRANSIT_STOPS) is_future = False sql_config_dict = dict( vmt_result_table=vmt_result_class.db_entity_key, vmt_schema=parse_schema_and_table(vmt_result_class._meta.db_table)[0], uf_canvas_table=scenario_class.db_entity_key, uf_canvas_schema=parse_schema_and_table(scenario_class._meta.db_table)[0], census_rates_table=census_rates_feature_class.db_entity_key, census_rates_schema=parse_schema_and_table(census_rates_feature_class._meta.db_table)[0], trip_lengths_table=trip_lengths_class.db_entity_key, trip_lengths_schema=parse_schema_and_table(trip_lengths_class._meta.db_table)[0], vmt_variables_table=vmt_variables_feature_class.db_entity_key, vmt_variables_schema=parse_schema_and_table(vmt_variables_feature_class._meta.db_table)[0], vmt_rel_table=parse_schema_and_table(vmt_result_class._meta.db_table)[1], vmt_rel_column=vmt_result_class._meta.parents.values()[0].column, transit_stop_schema=parse_schema_and_table(transit_stop_class._meta.db_table)[0], transit_stop_table=transit_stop_class.db_entity_key, config_entity=self.config_entity ) # if not kwargs.get('postprocess_only'): self.run_vmt_preprocesses(sql_config_dict, **kwargs) drop_table('{vmt_schema}.{vmt_result_table}'.format(**sql_config_dict)) truncate_table('{vmt_schema}.{vmt_rel_table}'.format(**sql_config_dict)) attribute_list = filter(lambda x: x != 'id', vmt_output_field_list) output_field_syntax = 'id int, ' + create_sql_calculations(attribute_list, '{0} numeric(14, 4)') pSql = ''' create table {vmt_schema}.{vmt_result_table} ({output_field_syntax});'''.format( output_field_syntax=output_field_syntax, **sql_config_dict) execute_sql(pSql) trip_lengths = DbEntityKey.VMT_FUTURE_TRIP_LENGTHS if is_future else DbEntityKey.VMT_BASE_TRIP_LENGTHS total_employment = scenario_class.objects.aggregate(Sum('emp')) all_features = scenario_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0)) all_features_length = len(all_features) max_id = scenario_class.objects.all().order_by("-id")[0].id min_id = scenario_class.objects.all().order_by("id")[0].id # This section of the model passes data from POSTGRES into Python and is saved in memory before being committed # back to the database. In order to not use all memory with large datasets, jobs are broken up with a maximum # job size of JOB_SIZE rows before being committed to the database. It will iterate through until all rows are # calculated and committed. if all_features_length > self.JOB_SIZE: job_count = all_features_length / self.JOB_SIZE rows_per_range = (max_id - min_id) / job_count else: rows_per_range = max_id - min_id job_count = 1 print 'Job Count: {0}'.format(job_count) start_id = min_id for i in range(job_count): if i == job_count - 1: end_id = max_id else: end_id = start_id + rows_per_range - 1 logger.info('Job: {0}'.format(i)) logger.info('Start Id: {0}'.format(start_id)) logger.info('End Id: {0}'.format(end_id)) vmt_output_list = [] features = all_features.filter(id__range=(start_id, end_id)) annotated_features = annotated_related_feature_class_pk_via_geographies(features, self.config_entity, [ DbEntityKey.VMT_VARIABLES, DbEntityKey.CENSUS_RATES, DbEntityKey.VMT_FUTURE_TRIP_LENGTHS, DbEntityKey.VMT_BASE_TRIP_LENGTHS, trip_lengths]) assert annotated_features.exists(), "VMT is about to process 0 results" failed_features = [] for feature in annotated_features: trip_length_id = feature.vmt_future_trip_lengths if is_future else feature.vmt_base_trip_lengths try: trip_lengths_feature = trip_lengths_class.objects.get(id=trip_length_id) except trip_lengths_class.DoesNotExist, e: failed_features.append(feature) logger.error('Cannot find trip lengths for geography with id = {0}'.format(feature.id)) continue vmt_variables_feature = vmt_variables_feature_class.objects.get(id=feature.vmt_variables) try: census_rates_feature = census_rates_feature_class.objects.get(id=feature.census_rates) except census_rates_feature_class.DoesNotExist, e: logger.error('Cannot find census rate with id = {0}'.format(feature.census_rates)) continue vmt_feature = dict( id=int(feature.id), acres_gross=float(feature.acres_gross) or 0, acres_parcel=float(feature.acres_parcel) or 0, acres_parcel_res=float(feature.acres_parcel_res) or 0, acres_parcel_emp=float(feature.acres_parcel_emp) or 0, acres_parcel_mixed=float(feature.acres_parcel_mixed_use) or 0, intersections_qtrmi=float(feature.intersection_density_sqmi) or 0, du=float(feature.du) or 0, du_occupancy_rate=float(feature.hh / feature.du if feature.du else 0), du_detsf=float(feature.du_detsf) or 0, du_attsf=float(feature.du_attsf) or 0, du_mf=float(feature.du_mf) or 0, du_mf2to4=float(feature.du_mf2to4) or 0, du_mf5p=float(feature.du_mf5p) or 0, hh=float(feature.hh) or 0, hh_avg_size=float(feature.pop / feature.hh if feature.hh > 0 else 0), hh_avg_inc=float(census_rates_feature.hh_agg_inc_rate) or 0, hh_inc_00_10=float(feature.hh * census_rates_feature.hh_inc_00_10_rate) or 0, hh_inc_10_20=float(feature.hh * census_rates_feature.hh_inc_10_20_rate) or 0, hh_inc_20_30=float(feature.hh * census_rates_feature.hh_inc_20_30_rate) or 0, hh_inc_30_40=float(feature.hh * census_rates_feature.hh_inc_30_40_rate) or 0, hh_inc_40_50=float(feature.hh * census_rates_feature.hh_inc_40_50_rate) or 0, hh_inc_50_60=float(feature.hh * census_rates_feature.hh_inc_50_60_rate) or 0, hh_inc_60_75=float(feature.hh * census_rates_feature.hh_inc_60_75_rate) or 0, hh_inc_75_100=float(feature.hh * census_rates_feature.hh_inc_75_100_rate) or 0, hh_inc_100p=float(feature.hh * (census_rates_feature.hh_inc_100_125_rate + census_rates_feature.hh_inc_125_150_rate + census_rates_feature.hh_inc_150_200_rate + census_rates_feature.hh_inc_200p_rate)) or 0, pop=float(feature.pop) or 0, pop_employed=float(feature.pop * census_rates_feature.pop_age16_up_rate * census_rates_feature.pop_employed_rate) or 0, pop_age16_up=float(feature.pop * census_rates_feature.pop_age16_up_rate) or 0, pop_age65_up=float(feature.pop * census_rates_feature.pop_age65_up_rate) or 0, emp=float(feature.emp) or 0, emp_retail=float(feature.emp_retail_services + feature.emp_other_services) or 0, emp_restaccom=float(feature.emp_accommodation + feature.emp_restaurant) or 0, emp_arts_entertainment=float(feature.emp_arts_entertainment) or 0, emp_office=float(feature.emp_off) or 0, emp_public=float(feature.emp_public_admin + feature.emp_education) or 0, emp_industry=float(feature.emp_ind + feature.emp_ag) or 0, emp_within_1mile=float(vmt_variables_feature.emp_1mile) or 0, hh_within_quarter_mile_trans=1 if vmt_variables_feature.transit_1km > 0 else 0, vb_acres_parcel_res_total=float(vmt_variables_feature.acres_parcel_res_vb) or 0, vb_acres_parcel_emp_total=float(vmt_variables_feature.acres_parcel_emp_vb) or 0, vb_acres_parcel_mixed_total=float(vmt_variables_feature.acres_parcel_mixed_use_vb) or 0, vb_du_total=float(vmt_variables_feature.du_vb) or 0, vb_pop_total=float(vmt_variables_feature.pop_vb) or 0, vb_emp_total=float(vmt_variables_feature.emp_vb) or 0, vb_emp_retail_total=float(vmt_variables_feature.emp_ret_vb) or 0, vb_hh_total=float(vmt_variables_feature.hh_vb) or 0, vb_du_mf_total=float(vmt_variables_feature.du_mf_vb) or 0, vb_hh_inc_00_10_total=float(vmt_variables_feature.hh_inc_00_10_vb) or 0, vb_hh_inc_10_20_total=float(vmt_variables_feature.hh_inc_10_20_vb) or 0, vb_hh_inc_20_30_total=float(vmt_variables_feature.hh_inc_20_30_vb) or 0, vb_hh_inc_30_40_total=float(vmt_variables_feature.hh_inc_30_40_vb) or 0, vb_hh_inc_40_50_total=float(vmt_variables_feature.hh_inc_40_50_vb) or 0, vb_hh_inc_50_60_total=float(vmt_variables_feature.hh_inc_50_60_vb) or 0, vb_hh_inc_60_75_total=float(vmt_variables_feature.hh_inc_60_75_vb) or 0, vb_hh_inc_75_100_total=float(vmt_variables_feature.hh_inc_75_100_vb) or 0, vb_hh_inc_100p_total=float(vmt_variables_feature.hh_inc_100p_vb) or 0, vb_pop_employed_total=float(vmt_variables_feature.pop_employed_vb) or 0, vb_pop_age16_up_total=float(vmt_variables_feature.pop_age16_up_vb) or 0, vb_pop_age65_up_total=float(vmt_variables_feature.pop_age65_up_vb) or 0, emp30m_transit=float(trip_lengths_feature.emp_30min_transit) or 0, emp45m_transit=float(trip_lengths_feature.emp_45min_transit) or 0, prod_hbw=float(trip_lengths_feature.productions_hbw) or 0, prod_hbo=float(trip_lengths_feature.productions_hbo) or 0, prod_nhb=float(trip_lengths_feature.productions_nhb) or 0, attr_hbw=float(trip_lengths_feature.attractions_hbw) or 0, attr_hbo=float(trip_lengths_feature.attractions_hbo) or 0, attr_nhb=float(trip_lengths_feature.attractions_nhb) or 0, qmb_acres_parcel_res_total=float(vmt_variables_feature.acres_parcel_res_qtrmi) or 0, qmb_acres_parcel_emp_total=float(vmt_variables_feature.acres_parcel_emp_qtrmi) or 0, qmb_acres_parcel_mixed_total=float(vmt_variables_feature.acres_parcel_mixed_use_qtrmi) or 0, qmb_du_total=float(vmt_variables_feature.du_qtrmi) or 0, qmb_pop_total=float(vmt_variables_feature.pop_qtrmi) or 0, qmb_emp_total=float(vmt_variables_feature.emp_qtrmi) or 0, qmb_emp_retail=float(vmt_variables_feature.emp_ret_qtrmi) or 0, hh_avg_veh=float(census_rates_feature.hh_agg_veh_rate) or 0, truck_adjustment_factor=0.031, total_employment=float(total_employment['emp__sum']) or 0) # run raw trip generation vmt_feature_trips = generate_raw_trips(vmt_feature) # run trip purpose splits vmt_feature_trip_purposes = calculate_trip_purpose_splits(vmt_feature_trips) # run log odds vmt_feature_log_odds = calculate_log_odds(vmt_feature_trip_purposes) # run vmt equations vmt_output = calculate_final_vmt_results(vmt_feature_log_odds) # filters the vmt feature dictionary for specific output fields for writing to the database output_list = map(lambda key: vmt_output[key], vmt_output_field_list) vmt_output_list.append(output_list)
def update(self, **kwargs): # Make sure all related models have been created before querying logger.info("Executing Vmt using {0}".format(self.config_entity)) self.vmt_progress(0.1, **kwargs) vmt_result_class = self.config_entity.db_entity_feature_class( DbEntityKey.VMT) vmt_variables_feature_class = self.config_entity.db_entity_feature_class( DbEntityKey.VMT_VARIABLES) census_rates_feature_class = self.config_entity.db_entity_feature_class( DbEntityKey.CENSUS_RATES) if isinstance(self.config_entity.subclassed, FutureScenario): scenario_class = self.config_entity.db_entity_feature_class( DbEntityKey.END_STATE) trip_lengths_class = self.config_entity.db_entity_feature_class( DbEntityKey.VMT_FUTURE_TRIP_LENGTHS) transit_stop_class = self.config_entity.db_entity_feature_class( DbEntityKey.FUTURE_TRANSIT_STOPS) is_future = True else: scenario_class = self.config_entity.db_entity_feature_class( DbEntityKey.BASE_CANVAS) trip_lengths_class = self.config_entity.db_entity_feature_class( DbEntityKey.VMT_BASE_TRIP_LENGTHS) transit_stop_class = self.config_entity.db_entity_feature_class( DbEntityKey.BASE_TRANSIT_STOPS) is_future = False sql_config_dict = dict( vmt_result_table=vmt_result_class.db_entity_key, vmt_schema=parse_schema_and_table( vmt_result_class._meta.db_table)[0], uf_canvas_table=scenario_class.db_entity_key, uf_canvas_schema=parse_schema_and_table( scenario_class._meta.db_table)[0], census_rates_table=census_rates_feature_class.db_entity_key, census_rates_schema=parse_schema_and_table( census_rates_feature_class._meta.db_table)[0], trip_lengths_table=trip_lengths_class.db_entity_key, trip_lengths_schema=parse_schema_and_table( trip_lengths_class._meta.db_table)[0], vmt_variables_table=vmt_variables_feature_class.db_entity_key, vmt_variables_schema=parse_schema_and_table( vmt_variables_feature_class._meta.db_table)[0], vmt_rel_table=parse_schema_and_table( vmt_result_class._meta.db_table)[1], vmt_rel_column=vmt_result_class._meta.parents.values()[0].column, transit_stop_schema=parse_schema_and_table( transit_stop_class._meta.db_table)[0], transit_stop_table=transit_stop_class.db_entity_key, config_entity=self.config_entity) # if not kwargs.get('postprocess_only'): self.run_vmt_preprocesses(sql_config_dict, **kwargs) drop_table('{vmt_schema}.{vmt_result_table}'.format(**sql_config_dict)) truncate_table( '{vmt_schema}.{vmt_rel_table}'.format(**sql_config_dict)) attribute_list = filter(lambda x: x != 'id', vmt_output_field_list) output_field_syntax = 'id int, ' + create_sql_calculations( attribute_list, '{0} numeric(14, 4)') pSql = ''' create table {vmt_schema}.{vmt_result_table} ({output_field_syntax});'''.format( output_field_syntax=output_field_syntax, **sql_config_dict) execute_sql(pSql) trip_lengths = DbEntityKey.VMT_FUTURE_TRIP_LENGTHS if is_future else DbEntityKey.VMT_BASE_TRIP_LENGTHS total_employment = scenario_class.objects.aggregate(Sum('emp')) all_features = scenario_class.objects.filter( Q(du__gt=0) | Q(emp__gt=0)) all_features_length = len(all_features) max_id = scenario_class.objects.all().order_by("-id")[0].id min_id = scenario_class.objects.all().order_by("id")[0].id # This section of the model passes data from POSTGRES into Python and is saved in memory before being committed # back to the database. In order to not use all memory with large datasets, jobs are broken up with a maximum # job size of JOB_SIZE rows before being committed to the database. It will iterate through until all rows are # calculated and committed. if all_features_length > self.JOB_SIZE: job_count = all_features_length / self.JOB_SIZE rows_per_range = (max_id - min_id) / job_count else: rows_per_range = max_id - min_id job_count = 1 print 'Job Count: {0}'.format(job_count) start_id = min_id for i in range(job_count): if i == job_count - 1: end_id = max_id else: end_id = start_id + rows_per_range - 1 logger.info('Job: {0}'.format(i)) logger.info('Start Id: {0}'.format(start_id)) logger.info('End Id: {0}'.format(end_id)) vmt_output_list = [] features = all_features.filter(id__range=(start_id, end_id)) annotated_features = annotated_related_feature_class_pk_via_geographies( features, self.config_entity, [ DbEntityKey.VMT_VARIABLES, DbEntityKey.CENSUS_RATES, DbEntityKey.VMT_FUTURE_TRIP_LENGTHS, DbEntityKey.VMT_BASE_TRIP_LENGTHS, trip_lengths ]) assert annotated_features.exists( ), "VMT is about to process 0 results" failed_features = [] for feature in annotated_features: trip_length_id = feature.vmt_future_trip_lengths if is_future else feature.vmt_base_trip_lengths try: trip_lengths_feature = trip_lengths_class.objects.get( id=trip_length_id) except trip_lengths_class.DoesNotExist, e: failed_features.append(feature) logger.error( 'Cannot find trip lengths for geography with id = {0}'. format(feature.id)) continue vmt_variables_feature = vmt_variables_feature_class.objects.get( id=feature.vmt_variables) try: census_rates_feature = census_rates_feature_class.objects.get( id=feature.census_rates) except census_rates_feature_class.DoesNotExist, e: logger.error( 'Cannot find census rate with id = {0}'.format( feature.census_rates)) continue vmt_feature = dict( id=int(feature.id), acres_gross=float(feature.acres_gross) or 0, acres_parcel=float(feature.acres_parcel) or 0, acres_parcel_res=float(feature.acres_parcel_res) or 0, acres_parcel_emp=float(feature.acres_parcel_emp) or 0, acres_parcel_mixed=float(feature.acres_parcel_mixed_use) or 0, intersections_qtrmi=float( feature.intersection_density_sqmi) or 0, du=float(feature.du) or 0, du_occupancy_rate=float(feature.hh / feature.du if feature.du else 0), du_detsf=float(feature.du_detsf) or 0, du_attsf=float(feature.du_attsf) or 0, du_mf=float(feature.du_mf) or 0, du_mf2to4=float(feature.du_mf2to4) or 0, du_mf5p=float(feature.du_mf5p) or 0, hh=float(feature.hh) or 0, hh_avg_size=float(feature.pop / feature.hh if feature.hh > 0 else 0), hh_avg_inc=float(census_rates_feature.hh_agg_inc_rate) or 0, hh_inc_00_10=float( feature.hh * census_rates_feature.hh_inc_00_10_rate) or 0, hh_inc_10_20=float( feature.hh * census_rates_feature.hh_inc_10_20_rate) or 0, hh_inc_20_30=float( feature.hh * census_rates_feature.hh_inc_20_30_rate) or 0, hh_inc_30_40=float( feature.hh * census_rates_feature.hh_inc_30_40_rate) or 0, hh_inc_40_50=float( feature.hh * census_rates_feature.hh_inc_40_50_rate) or 0, hh_inc_50_60=float( feature.hh * census_rates_feature.hh_inc_50_60_rate) or 0, hh_inc_60_75=float( feature.hh * census_rates_feature.hh_inc_60_75_rate) or 0, hh_inc_75_100=float( feature.hh * census_rates_feature.hh_inc_75_100_rate) or 0, hh_inc_100p=float( feature.hh * (census_rates_feature.hh_inc_100_125_rate + census_rates_feature.hh_inc_125_150_rate + census_rates_feature.hh_inc_150_200_rate + census_rates_feature.hh_inc_200p_rate)) or 0, pop=float(feature.pop) or 0, pop_employed=float( feature.pop * census_rates_feature.pop_age16_up_rate * census_rates_feature.pop_employed_rate) or 0, pop_age16_up=float( feature.pop * census_rates_feature.pop_age16_up_rate) or 0, pop_age65_up=float( feature.pop * census_rates_feature.pop_age65_up_rate) or 0, emp=float(feature.emp) or 0, emp_retail=float(feature.emp_retail_services + feature.emp_other_services) or 0, emp_restaccom=float(feature.emp_accommodation + feature.emp_restaurant) or 0, emp_arts_entertainment=float( feature.emp_arts_entertainment) or 0, emp_office=float(feature.emp_off) or 0, emp_public=float(feature.emp_public_admin + feature.emp_education) or 0, emp_industry=float(feature.emp_ind + feature.emp_ag) or 0, emp_within_1mile=float(vmt_variables_feature.emp_1mile) or 0, hh_within_quarter_mile_trans=1 if vmt_variables_feature.transit_1km > 0 else 0, vb_acres_parcel_res_total=float( vmt_variables_feature.acres_parcel_res_vb) or 0, vb_acres_parcel_emp_total=float( vmt_variables_feature.acres_parcel_emp_vb) or 0, vb_acres_parcel_mixed_total=float( vmt_variables_feature.acres_parcel_mixed_use_vb) or 0, vb_du_total=float(vmt_variables_feature.du_vb) or 0, vb_pop_total=float(vmt_variables_feature.pop_vb) or 0, vb_emp_total=float(vmt_variables_feature.emp_vb) or 0, vb_emp_retail_total=float(vmt_variables_feature.emp_ret_vb) or 0, vb_hh_total=float(vmt_variables_feature.hh_vb) or 0, vb_du_mf_total=float(vmt_variables_feature.du_mf_vb) or 0, vb_hh_inc_00_10_total=float( vmt_variables_feature.hh_inc_00_10_vb) or 0, vb_hh_inc_10_20_total=float( vmt_variables_feature.hh_inc_10_20_vb) or 0, vb_hh_inc_20_30_total=float( vmt_variables_feature.hh_inc_20_30_vb) or 0, vb_hh_inc_30_40_total=float( vmt_variables_feature.hh_inc_30_40_vb) or 0, vb_hh_inc_40_50_total=float( vmt_variables_feature.hh_inc_40_50_vb) or 0, vb_hh_inc_50_60_total=float( vmt_variables_feature.hh_inc_50_60_vb) or 0, vb_hh_inc_60_75_total=float( vmt_variables_feature.hh_inc_60_75_vb) or 0, vb_hh_inc_75_100_total=float( vmt_variables_feature.hh_inc_75_100_vb) or 0, vb_hh_inc_100p_total=float( vmt_variables_feature.hh_inc_100p_vb) or 0, vb_pop_employed_total=float( vmt_variables_feature.pop_employed_vb) or 0, vb_pop_age16_up_total=float( vmt_variables_feature.pop_age16_up_vb) or 0, vb_pop_age65_up_total=float( vmt_variables_feature.pop_age65_up_vb) or 0, emp30m_transit=float( trip_lengths_feature.emp_30min_transit) or 0, emp45m_transit=float( trip_lengths_feature.emp_45min_transit) or 0, prod_hbw=float(trip_lengths_feature.productions_hbw) or 0, prod_hbo=float(trip_lengths_feature.productions_hbo) or 0, prod_nhb=float(trip_lengths_feature.productions_nhb) or 0, attr_hbw=float(trip_lengths_feature.attractions_hbw) or 0, attr_hbo=float(trip_lengths_feature.attractions_hbo) or 0, attr_nhb=float(trip_lengths_feature.attractions_nhb) or 0, qmb_acres_parcel_res_total=float( vmt_variables_feature.acres_parcel_res_qtrmi) or 0, qmb_acres_parcel_emp_total=float( vmt_variables_feature.acres_parcel_emp_qtrmi) or 0, qmb_acres_parcel_mixed_total=float( vmt_variables_feature.acres_parcel_mixed_use_qtrmi) or 0, qmb_du_total=float(vmt_variables_feature.du_qtrmi) or 0, qmb_pop_total=float(vmt_variables_feature.pop_qtrmi) or 0, qmb_emp_total=float(vmt_variables_feature.emp_qtrmi) or 0, qmb_emp_retail=float(vmt_variables_feature.emp_ret_qtrmi) or 0, hh_avg_veh=float(census_rates_feature.hh_agg_veh_rate) or 0, truck_adjustment_factor=0.031, total_employment=float(total_employment['emp__sum']) or 0) # run raw trip generation vmt_feature_trips = generate_raw_trips(vmt_feature) # run trip purpose splits vmt_feature_trip_purposes = calculate_trip_purpose_splits( vmt_feature_trips) # run log odds vmt_feature_log_odds = calculate_log_odds( vmt_feature_trip_purposes) # run vmt equations vmt_output = calculate_final_vmt_results(vmt_feature_log_odds) # filters the vmt feature dictionary for specific output fields for writing to the database output_list = map(lambda key: vmt_output[key], vmt_output_field_list) vmt_output_list.append(output_list)
def run_future_water_calculations(self, **kwargs): self.base_year = self.config_entity.scenario.project.base_year self.future_year = self.config_entity.scenario.year self.increment = self.future_year - self.base_year self.annualize_efficiencies() features = self.end_state_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0)) annotated_features = annotated_related_feature_class_pk_via_geographies(features, self.config_entity, [ DbEntityKey.BASE_CANVAS, DbEntityKey.CLIMATE_ZONES]) water_output_list = [] options = dict( water_result_table=self.water_class.db_entity_key, water_schema=parse_schema_and_table(self.water_class._meta.db_table)[0], base_table=self.base_class.db_entity_key, base_schema=parse_schema_and_table(self.base_class._meta.db_table)[0], ) approx_fifth = int(annotated_features.count() / 14 - 1) if annotated_features.count() > 30 else 1 i = 1 for feature in annotated_features.iterator(): self.feature = feature self.result_dict = defaultdict(lambda: float(0)) if i % approx_fifth == 0: self.report_progress(0.05, **kwargs) base_feature = self.base_class.objects.get(id=feature.base_canvas) if feature.climate_zones: climate_zone_feature = self.climate_zone_class.objects.get(id=feature.climate_zones) else: logger.warn("No Climate Zone intersection for feature id {0} or check geography relation table".format(feature.id)) continue self.feature_dict = dict( id=feature.id, pop=float(feature.pop), hh=float(feature.hh), emp=float(feature.emp), evapotranspiration_zone=climate_zone_feature.evapotranspiration_zone.zone, annual_evapotranspiration=float(climate_zone_feature.evapotranspiration_zone.annual_evapotranspiration), ) for use in 'residential', 'commercial': key = "{use}_irrigated_sqft".format(use=use) self.feature_dict.update({ key + "_redev": self.redev_units(key, feature, base_feature), key + "_new": self.new_units(key, feature, base_feature), key + "_base": float(getattr(base_feature, key)) }) future_residential_factor = feature.hh / feature.du * feature.pop / feature.hh if feature.hh > 0 else 0 base_residential_factor = base_feature.hh / base_feature.du * base_feature.pop / base_feature.hh if base_feature.hh else 0 for key in self.RESIDENTIAL_TYPES: self.feature_dict.update({ key + "_redev": self.redev_units(key, feature, base_feature) * float(base_residential_factor), key + "_new": self.new_units(key, feature, base_feature) * float(future_residential_factor), key + "_base": float(getattr(base_feature, key)) * float(base_residential_factor) }) for key in self.COMMERCIAL_TYPES: self.feature_dict.update({ key + "_redev": self.redev_units("emp_" + key, feature, base_feature) * float(base_residential_factor), key + "_new": self.new_units("emp_" + key, feature, base_feature) * float(future_residential_factor), key + "_base": float(getattr(base_feature, "emp_" +key)) * float(base_residential_factor) }) self.calculate_future_water() self.calculate_visualized_field() output_row = map(lambda key: self.result_dict.get(key), self.output_fields) water_output_list.append(output_row) i += 1 return water_output_list, options
def run_base_water_calculations(self): features = self.base_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0)) annotated_features = annotated_related_feature_class_pk_via_geographies(features, self.config_entity, [ DbEntityKey.CLIMATE_ZONES]) water_output_list = [] options = dict( water_result_table=self.water_class.db_entity_key, water_schema=parse_schema_and_table(self.water_class._meta.db_table)[0], base_table=self.base_class.db_entity_key, base_schema=parse_schema_and_table(self.base_class._meta.db_table)[0], ) for feature in annotated_features.iterator(): self.result_dict = defaultdict(lambda: float(0)) self.feature = feature if feature.climate_zones: climate_zone_feature = self.climate_zone_class.objects.get(id=feature.climate_zones) else: logger.warn("No Climate Zone intersection for feature id {0} or check geography relation table".format(feature.id)) continue hh_factor = (feature.pop / feature.hh) * (feature.hh / feature.du) if (feature.du > 0 and feature.hh > 0) else 0 self.feature_dict = dict( id=feature.id, pop=float(feature.pop), hh=float(feature.hh), emp=float(feature.emp), evapotranspiration_zone=climate_zone_feature.evapotranspiration_zone.zone, annual_evapotranspiration=float(climate_zone_feature.evapotranspiration_zone.annual_evapotranspiration), residential_irrigated_sqft=float(feature.residential_irrigated_sqft), commercial_irrigated_sqft=float(feature.commercial_irrigated_sqft), du_detsf_ll=float(feature.du_detsf_ll * hh_factor), du_detsf_sl=float(feature.du_detsf_sl * hh_factor), du_attsf=float(feature.du_attsf * hh_factor), du_mf=float(feature.du_mf * hh_factor), retail_services=float(feature.emp_retail_services), restaurant=float(feature.emp_restaurant), accommodation=float(feature.emp_accommodation), arts_entertainment=float(feature.emp_arts_entertainment), other_services=float(feature.emp_other_services), office_services=float(feature.emp_office_services), public_admin=float(feature.emp_public_admin), education=float(feature.emp_education), medical_services=float(feature.emp_medical_services), wholesale=float(feature.emp_wholesale), transport_warehousing=float(feature.emp_transport_warehousing), manufacturing=float(feature.emp_manufacturing), construction=float(feature.emp_construction), utilities=float(feature.emp_utilities), agriculture=float(feature.emp_agriculture), extraction=float(feature.emp_extraction), military=float(feature.emp_military) ) self.calculate_base_water() self.calculate_visualized_field() output_row = map(lambda key: self.result_dict[key], self.output_fields) water_output_list.append(output_row) return water_output_list, options
def run_future_water_calculations(self, **kwargs): self.base_year = self.config_entity.scenario.project.base_year self.future_year = self.config_entity.scenario.year self.increment = self.future_year - self.base_year self.annualize_efficiencies() features = self.end_state_class.objects.filter( Q(du__gt=0) | Q(emp__gt=0)) annotated_features = annotated_related_feature_class_pk_via_geographies( features, self.config_entity, [DbEntityKey.BASE_CANVAS, DbEntityKey.CLIMATE_ZONES]) water_output_list = [] options = dict( water_result_table=self.water_class.db_entity_key, water_schema=parse_schema_and_table( self.water_class._meta.db_table)[0], base_table=self.base_class.db_entity_key, base_schema=parse_schema_and_table( self.base_class._meta.db_table)[0], ) approx_fifth = int(annotated_features.count() / 14 - 1) if annotated_features.count() > 30 else 1 i = 1 for feature in annotated_features.iterator(): self.feature = feature self.result_dict = defaultdict(lambda: float(0)) if i % approx_fifth == 0: self.report_progress(0.05, **kwargs) base_feature = self.base_class.objects.get(id=feature.base_canvas) if feature.climate_zones: climate_zone_feature = self.climate_zone_class.objects.get( id=feature.climate_zones) else: logger.warn( "No Climate Zone intersection for feature id {0} or check geography relation table" .format(feature.id)) continue self.feature_dict = dict( id=feature.id, pop=float(feature.pop), hh=float(feature.hh), emp=float(feature.emp), evapotranspiration_zone=climate_zone_feature. evapotranspiration_zone.zone, annual_evapotranspiration=float( climate_zone_feature.evapotranspiration_zone. annual_evapotranspiration), ) for use in 'residential', 'commercial': key = "{use}_irrigated_sqft".format(use=use) self.feature_dict.update({ key + "_redev": self.redev_units(key, feature, base_feature), key + "_new": self.new_units(key, feature, base_feature), key + "_base": float(getattr(base_feature, key)) }) future_residential_factor = feature.hh / feature.du * feature.pop / feature.hh if feature.hh > 0 else 0 base_residential_factor = base_feature.hh / base_feature.du * base_feature.pop / base_feature.hh if base_feature.hh else 0 for key in self.RESIDENTIAL_TYPES: self.feature_dict.update({ key + "_redev": self.redev_units(key, feature, base_feature) * float(base_residential_factor), key + "_new": self.new_units(key, feature, base_feature) * float(future_residential_factor), key + "_base": float(getattr(base_feature, key)) * float(base_residential_factor) }) for key in self.COMMERCIAL_TYPES: self.feature_dict.update({ key + "_redev": self.redev_units("emp_" + key, feature, base_feature) * float(base_residential_factor), key + "_new": self.new_units("emp_" + key, feature, base_feature) * float(future_residential_factor), key + "_base": float(getattr(base_feature, "emp_" + key)) * float(base_residential_factor) }) self.calculate_future_water() self.calculate_visualized_field() output_row = map(lambda key: self.result_dict.get(key), self.output_fields) water_output_list.append(output_row) i += 1 return water_output_list, options
def run_base_water_calculations(self): features = self.base_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0)) annotated_features = annotated_related_feature_class_pk_via_geographies( features, self.config_entity, [DbEntityKey.CLIMATE_ZONES]) water_output_list = [] options = dict( water_result_table=self.water_class.db_entity_key, water_schema=parse_schema_and_table( self.water_class._meta.db_table)[0], base_table=self.base_class.db_entity_key, base_schema=parse_schema_and_table( self.base_class._meta.db_table)[0], ) for feature in annotated_features.iterator(): self.result_dict = defaultdict(lambda: float(0)) self.feature = feature if feature.climate_zones: climate_zone_feature = self.climate_zone_class.objects.get( id=feature.climate_zones) else: logger.warn( "No Climate Zone intersection for feature id {0} or check geography relation table" .format(feature.id)) continue hh_factor = (feature.pop / feature.hh) * (feature.hh / feature.du) if ( feature.du > 0 and feature.hh > 0) else 0 self.feature_dict = dict( id=feature.id, pop=float(feature.pop), hh=float(feature.hh), emp=float(feature.emp), evapotranspiration_zone=climate_zone_feature. evapotranspiration_zone.zone, annual_evapotranspiration=float( climate_zone_feature.evapotranspiration_zone. annual_evapotranspiration), residential_irrigated_sqft=float( feature.residential_irrigated_sqft), commercial_irrigated_sqft=float( feature.commercial_irrigated_sqft), du_detsf_ll=float(feature.du_detsf_ll * hh_factor), du_detsf_sl=float(feature.du_detsf_sl * hh_factor), du_attsf=float(feature.du_attsf * hh_factor), du_mf=float(feature.du_mf * hh_factor), retail_services=float(feature.emp_retail_services), restaurant=float(feature.emp_restaurant), accommodation=float(feature.emp_accommodation), arts_entertainment=float(feature.emp_arts_entertainment), other_services=float(feature.emp_other_services), office_services=float(feature.emp_office_services), public_admin=float(feature.emp_public_admin), education=float(feature.emp_education), medical_services=float(feature.emp_medical_services), wholesale=float(feature.emp_wholesale), transport_warehousing=float(feature.emp_transport_warehousing), manufacturing=float(feature.emp_manufacturing), construction=float(feature.emp_construction), utilities=float(feature.emp_utilities), agriculture=float(feature.emp_agriculture), extraction=float(feature.emp_extraction), military=float(feature.emp_military)) self.calculate_base_water() self.calculate_visualized_field() output_row = map(lambda key: self.result_dict[key], self.output_fields) water_output_list.append(output_row) return water_output_list, options