Пример #1
0
    def aggregate_results_to_outcomes_summary_table(self, sql_config_dict):
        """
        Aggregates the result table (at grid scale) to a standardized summary table
        """

        attribute_list = filter(lambda x: x not in ['id', 'hh', 'pop', 'pop_adult', 'pop_adult_high', 'pop_adult_med',
                                                    'pop_adult_low', 'pop_senior', 'pop_teen', 'pop_children'],
                                self.outcome_fields)
        output_list = filter(lambda x: x not in ['id'],
                                self.outcome_fields)

        field_calculations = ''',
        '''.join([
            "case when SUM(pop) > 0 then SUM({field} * pop) / SUM(pop) else 0 end as {field}".format(
                field=field)
            for field in attribute_list
        ])

        truncate_table('{outcome_summary_schema}.{outcome_summary_table}'.format(**sql_config_dict))

        pSql = """
        SELECT
          SUM(hh) as hh,
          SUM(pop) as pop,
          SUM(pop_adult) as pop_adult,
          SUM(pop_adult_high) as pop_adult_high,
          SUM(pop_adult_med) as pop_adult_med,
          SUM(pop_adult_low) as pop_adult_low,
          SUM(pop_senior) as pop_senior,
          SUM(pop_teen) as pop_teen,
          SUM(pop_children) as pop_children,

          {field_calculations}

        FROM {grid_outcome_schema}.{grid_outcome_table};
        """.format(field_calculations=field_calculations, **sql_config_dict)

        summary_results = flatten(report_sql_values(pSql, 'fetchall'))

        index_id = 0
        for result in output_list:

            source = 'ref'
            model_name = result
            ph_models = self.list_UD4H_models()

            if '_minutes' in model_name:
                model_name = model_name[:-8] + '_min'

            for model in ph_models:
                ph_model_name = "".join(model[0]['name'].split())[:-4].lower()
                if ph_model_name == model_name:
                    source = model[0]['source']

            pSql = '''
            insert into {outcome_summary_schema}.{outcome_summary_table} values ({index_id}, '{column_name}', '{source}', {model_output});
            '''.format(index_id=index_id, column_name=str(result), source=str(source), model_output=summary_results[index_id], **sql_config_dict)

            execute_sql(pSql)
            index_id += 1
Пример #2
0
def populate_public_health_grid_relation_table(sql_config_dict):

    join_exists = table_exists(
        sql_config_dict['source_grid_table'] + '_join',
        sql_config_dict['public_health_variables_schema'])

    if not join_exists:

        pSql = '''
        create table {public_health_variables_schema}.{source_grid_table}_join
            as select
                a.id as grid_id, b.id as primary_id, c.id as census_id,
                st_area(st_intersection(a.wkb_geometry, b.wkb_geometry)) / st_area(b.wkb_geometry) as primary_proportion
        from
             {source_grid_schema}.{source_grid_table} a,
             {uf_canvas_schema}.{uf_canvas_table} b,
             {census_rate_schema}.{census_rate_table} c
        where
            st_intersects(a.wkb_geometry, b.wkb_geometry) and
            st_intersects(st_centroid(a.wkb_geometry), c.wkb_geometry);
        '''.format(**sql_config_dict)

        execute_sql(pSql)

        add_attribute_idx(sql_config_dict['public_health_variables_schema'],
                          sql_config_dict['source_grid_table'] + '_join',
                          'grid_id')

        add_attribute_idx(sql_config_dict['public_health_variables_schema'],
                          sql_config_dict['source_grid_table'] + '_join',
                          'primary_id')

        add_attribute_idx(sql_config_dict['public_health_variables_schema'],
                          sql_config_dict['source_grid_table'] + '_join',
                          'census_id')
Пример #3
0
def populate_public_health_grid_relation_table(sql_config_dict):

    join_exists = table_exists(sql_config_dict['source_grid_table'] + '_join',
                               sql_config_dict['public_health_variables_schema'])

    if not join_exists:

        pSql = '''
        create table {public_health_variables_schema}.{source_grid_table}_join
            as select
                a.id as grid_id, b.id as primary_id, c.id as census_id,
                st_area(st_intersection(a.wkb_geometry, b.wkb_geometry)) / st_area(b.wkb_geometry) as primary_proportion
        from
             {source_grid_schema}.{source_grid_table} a,
             {uf_canvas_schema}.{uf_canvas_table} b,
             {census_rate_schema}.{census_rate_table} c
        where
            st_intersects(a.wkb_geometry, b.wkb_geometry) and
            st_intersects(st_centroid(a.wkb_geometry), c.wkb_geometry);
        '''.format(**sql_config_dict)

        execute_sql(pSql)

        add_attribute_idx(sql_config_dict['public_health_variables_schema'],
                          sql_config_dict['source_grid_table'] + '_join', 'grid_id')

        add_attribute_idx(sql_config_dict['public_health_variables_schema'],
                          sql_config_dict['source_grid_table'] + '_join', 'primary_id')

        add_attribute_idx(sql_config_dict['public_health_variables_schema'],
                          sql_config_dict['source_grid_table'] + '_join', 'census_id')
Пример #4
0
def run_vmt_one_mile_buffers(sql_config_dict):

    add_analysis_geom(sql_config_dict['uf_canvas_schema'], sql_config_dict['uf_canvas_table'])
    add_analysis_geom(sql_config_dict['vmt_variables_schema'], sql_config_dict['vmt_variables_table'])
    add_analysis_geom(sql_config_dict['transit_stop_schema'], sql_config_dict['transit_stop_table'])

    pSql = '''
    update {vmt_variables_schema}.{vmt_variables_table} a set du = b.du, emp = b.emp
    from (select id, du, emp from {uf_canvas_schema}.{uf_canvas_table}) b where a.id = b.id;
    '''.format(vmt_variables_schema=sql_config_dict['vmt_variables_schema'],
               vmt_variables_table=sql_config_dict['vmt_variables_table'],
               uf_canvas_schema=sql_config_dict['uf_canvas_schema'],
               uf_canvas_table=sql_config_dict['uf_canvas_table'])

    execute_sql(pSql)

    aggregate_within_distance(dict(
        source_table=sql_config_dict['uf_canvas_schema'] + '.' + sql_config_dict['uf_canvas_table'],
        source_table_query='emp > 0',
        source_geometry_column='analysis_geom',
        target_table_schema=sql_config_dict['vmt_variables_schema'],
        target_table=sql_config_dict['vmt_variables_table'],
        target_table_query='du + emp > 0',
        target_geometry_column='analysis_geom',
        target_table_pk='id',
        distance=1609,
        suffix='one_mi',
        aggregation_type='sum',
        variable_field_dict=dict(
            emp_1mile=['emp'])
    ))
Пример #5
0
def run_vmt_quarter_mile_buffers(sql_config_dict):

    aggregate_within_distance(dict(
        source_table=sql_config_dict['uf_canvas_schema'] + '.' + sql_config_dict['uf_canvas_table'],
        source_table_query='pop + emp > 0',
        source_geometry_column='analysis_geom',
        target_table_schema=sql_config_dict['vmt_variables_schema'],
        target_table=sql_config_dict['vmt_variables_table'],
        target_table_query='du + emp > 0',
        target_geometry_column='analysis_geom',
        target_table_pk='id',
        distance=403,
        suffix='qtr_mi',
        aggregation_type='sum',
        variable_field_dict=dict(
            acres_parcel_res_qtrmi=['acres_parcel_res'],
            acres_parcel_emp_qtrmi=['acres_parcel_emp'],
            acres_parcel_mixed_use_qtrmi=['acres_parcel_mixed_use'],
            du_qtrmi=['du'],
            pop_qtrmi=['pop'],
            emp_qtrmi=['emp'],
            emp_ret_qtrmi=['emp_ret'])
    ))

    pSql = '''DROP INDEX {schema}.{schema}_{table}_analysis_geom;
    Alter Table {schema}.{table} drop column analysis_geom;'''.format(schema=sql_config_dict['transit_stop_schema'],
                                                                   table=sql_config_dict['transit_stop_table'])
    execute_sql(pSql)
Пример #6
0
def run_vmt_one_mile_buffers(sql_config_dict):

    add_analysis_geom(sql_config_dict['uf_canvas_schema'], sql_config_dict['uf_canvas_table'])
    add_analysis_geom(sql_config_dict['vmt_variables_schema'], sql_config_dict['vmt_variables_table'])
    add_analysis_geom(sql_config_dict['transit_stop_schema'], sql_config_dict['transit_stop_table'])

    pSql = '''
    update {vmt_variables_schema}.{vmt_variables_table} a set du = b.du, emp = b.emp
    from (select id, du, emp from {uf_canvas_schema}.{uf_canvas_table}) b where a.id = b.id;
    '''.format(vmt_variables_schema=sql_config_dict['vmt_variables_schema'],
               vmt_variables_table=sql_config_dict['vmt_variables_table'],
               uf_canvas_schema=sql_config_dict['uf_canvas_schema'],
               uf_canvas_table=sql_config_dict['uf_canvas_table'])

    execute_sql(pSql)

    aggregate_within_distance(dict(
        source_table=sql_config_dict['uf_canvas_schema'] + '.' + sql_config_dict['uf_canvas_table'],
        source_table_query='emp > 0',
        source_geometry_column='analysis_geom',
        target_table_schema=sql_config_dict['vmt_variables_schema'],
        target_table=sql_config_dict['vmt_variables_table'],
        target_table_query='du + emp > 0',
        target_geometry_column='analysis_geom',
        target_table_pk='id',
        distance=1609,
        suffix='one_mi',
        aggregation_type='sum',
        variable_field_dict=dict(
            emp_1mile=['emp'])
    ))
Пример #7
0
def run_vmt_quarter_mile_buffers(sql_config_dict):

    aggregate_within_distance(dict(
        source_table=sql_config_dict['uf_canvas_schema'] + '.' + sql_config_dict['uf_canvas_table'],
        source_table_query='pop + emp > 0',
        source_geometry_column='analysis_geom',
        target_table_schema=sql_config_dict['vmt_variables_schema'],
        target_table=sql_config_dict['vmt_variables_table'],
        target_table_query='du + emp > 0',
        target_geometry_column='analysis_geom',
        target_table_pk='id',
        distance=403,
        suffix='qtr_mi',
        aggregation_type='sum',
        variable_field_dict=dict(
            acres_parcel_res_qtrmi=['acres_parcel_res'],
            acres_parcel_emp_qtrmi=['acres_parcel_emp'],
            acres_parcel_mixed_use_qtrmi=['acres_parcel_mixed_use'],
            du_qtrmi=['du'],
            pop_qtrmi=['pop'],
            emp_qtrmi=['emp'],
            emp_ret_qtrmi=['emp_ret'])
    ))

    pSql = '''DROP INDEX {schema}.{schema}_{table}_analysis_geom;
    Alter Table {schema}.{table} drop column analysis_geom;'''.format(schema=sql_config_dict['transit_stop_schema'],
                                                                   table=sql_config_dict['transit_stop_table'])
    execute_sql(pSql)
    def aggregate_results_to_block_group(self, sql_config_dict):
        """
        Aggregates the result table (at grid scale) to a Census Block Group result
        """

        attribute_list = filter(
            lambda x: x not in [
                'id', 'hh', 'pop', 'pop_adult', 'pop_adult_high',
                'pop_adult_med', 'pop_adult_low', 'pop_senior', 'pop_teen',
                'pop_children'
            ], self.outcome_fields)

        field_calculations = ''',
        '''.join([
            "case when SUM(grid.pop) > 0 then SUM(grid.{field} * grid.pop) / SUM(grid.pop) else 0 end as {field}"
            .format(field=field) for field in attribute_list
        ])

        drop_table(
            '{block_group_outcome_schema}.{block_group_outcome_table}'.format(
                **sql_config_dict))

        create_blockgroup_results = """
        CREATE TABLE {block_group_outcome_schema}.{block_group_outcome_table} AS SELECT

          bg.id as id,
          bg.blockgroup as blockgroup,
          bg.tract as tract,
          bg.county_name as county_name,

          SUM(grid.pop) as pop,
          SUM(grid.hh) as hh,
          SUM(grid.pop_adult) as pop_adult,

          SUM(grid.pop_adult_high) as pop_adult_high,
          SUM(grid.pop_adult_med) as pop_adult_med,
          SUM(grid.pop_adult_low) as pop_adult_low,

          SUM(grid.pop_senior) as pop_senior,
          SUM(grid.pop_teen) as pop_teen,
          SUM(grid.pop_children) as pop_children,

          {field_calculations},

          bg.wkb_geometry as wkb_geometry

        FROM (select grid_id, census_id from {public_health_variables_schema}.{source_grid_table}_join group by grid_id, census_id) grid_portions
        inner join {grid_outcome_schema}.{grid_outcome_table} grid on grid_portions.grid_id = grid.id
        inner join {census_rate_schema}.{census_rate_table} bg on grid_portions.census_id = bg.id

        group by bg.id, bg.blockgroup, tract, bg.county_name, bg.wkb_geometry;

        """.format(field_calculations=field_calculations, **sql_config_dict)

        execute_sql(create_blockgroup_results)
Пример #9
0
    def run_analysis(self, **kwargs):

        start_time = time.time()
        self.report_progress(0.1, **kwargs)

        self.climate_zone_class = self.config_entity.db_entity_feature_class(DbEntityKey.CLIMATE_ZONES)
        self.energy_class = self.config_entity.db_entity_feature_class(DbEntityKey.ENERGY)
        self.base_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)
        self.rel_table = parse_schema_and_table(self.energy_class._meta.db_table)[1]
        self.rel_column = self.energy_class._meta.parents.values()[0].column

        options = dict(
            energy_result_table=self.energy_class.db_entity_key,
            energy_schema=parse_schema_and_table(self.energy_class._meta.db_table)[0],
            base_table=self.base_class.db_entity_key,
            base_schema=parse_schema_and_table(self.base_class._meta.db_table)[0],
        )
        logger.info("Running Energy Updater Tool with options %s" % options)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            self.end_state_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE)
            logger.info("Running Future Calculations")
            energy_output_list, options = self.run_future_calculations(options, **kwargs)
        else:
            logger.info("Running Base Calculations")
            energy_output_list, options = self.run_base_calculations(options, **kwargs)

        logger.info("Writing to feature table {energy_schema}.{energy_result_table}".format(**options))
        self.write_results_to_database(options, energy_output_list)


        logger.info("Writing to relative table {energy_schema}.{rel_table}".format(
            energy_schema=options['energy_schema'],
            rel_table=self.rel_table))

        updated = datetime.datetime.now()
        truncate_table(options['energy_schema'] + '.' + self.rel_table)

        pSql = '''
        insert into {energy_schema}.{rel_table} ({rel_column}, updated)
        select id, '{updated}' from {energy_schema}.{energy_result_table};'''.format(
            energy_schema=options['energy_schema'],
            energy_result_table=options['energy_result_table'],
            rel_table=self.rel_table,
            rel_column=self.rel_column,
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(self.config_entity, self.config_entity.computed_db_entities(key=DbEntityKey.ENERGY)[0])
        self.report_progress(0.2, **kwargs)

        logger.info('Finished: ' + str(time.time() - start_time))
Пример #10
0
    def run_analysis(self, **kwargs):

        start_time = time.time()
        self.report_progress(0.1, **kwargs)

        self.climate_zone_class = self.config_entity.db_entity_feature_class(DbEntityKey.CLIMATE_ZONES)
        self.energy_class = self.config_entity.db_entity_feature_class(DbEntityKey.ENERGY)
        self.base_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)
        self.rel_table = parse_schema_and_table(self.energy_class._meta.db_table)[1]
        self.rel_column = self.energy_class._meta.parents.values()[0].column

        options = dict(
            energy_result_table=self.energy_class.db_entity_key,
            energy_schema=parse_schema_and_table(self.energy_class._meta.db_table)[0],
            base_table=self.base_class.db_entity_key,
            base_schema=parse_schema_and_table(self.base_class._meta.db_table)[0],
        )
        logger.info("Running Energy Updater Tool with options %s" % options)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            self.end_state_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE)
            logger.info("Running Future Calculations")
            energy_output_list, options = self.run_future_calculations(options, **kwargs)
        else:
            logger.info("Running Base Calculations")
            energy_output_list, options = self.run_base_calculations(options, **kwargs)

        logger.info("Writing to feature table {energy_schema}.{energy_result_table}".format(**options))
        self.write_results_to_database(options, energy_output_list)


        logger.info("Writing to relative table {energy_schema}.{rel_table}".format(
            energy_schema=options['energy_schema'],
            rel_table=self.rel_table))

        updated = datetime.datetime.now()
        truncate_table(options['energy_schema'] + '.' + self.rel_table)

        pSql = '''
        insert into {energy_schema}.{rel_table} ({rel_column}, updated)
        select id, '{updated}' from {energy_schema}.{energy_result_table};'''.format(
            energy_schema=options['energy_schema'],
            energy_result_table=options['energy_result_table'],
            rel_table=self.rel_table,
            rel_column=self.rel_column,
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(self.config_entity, self.config_entity.computed_db_entities(key=DbEntityKey.ENERGY)[0])
        self.report_progress(0.2, **kwargs)

        logger.info('Finished: ' + str(time.time() - start_time))
Пример #11
0
def run_aggregate_within_variable_distance_processes(sql_config_dict):

    drop_table(
        '{public_health_variables_schema}.{uf_canvas_table}_variable'.format(
            public_health_variables_schema=sql_config_dict[
                'public_health_variables_schema'],
            uf_canvas_table=sql_config_dict['uf_canvas_table']))

    pSql = '''
    create table {public_health_variables_schema}.{uf_canvas_table}_variable
    as select
      a.id, st_transform(a.wkb_geometry, 3310) as wkb_geometry, cast(a.attractions_hbw * 1609.0 as float) as distance,
      sum(du * st_area(st_intersection(a.wkb_geometry, b.wkb_geometry)) / st_area(b.wkb_geometry)) as du_variable,
      sum(emp * st_area(st_intersection(a.wkb_geometry, b.wkb_geometry)) / st_area(b.wkb_geometry)) as emp_variable
    from
      (select id, wkb_geometry, attractions_hbw from {trip_lengths_schema}.{trip_lengths_table}) a,
      (select wkb_geometry, du, emp from {uf_canvas_schema}.{uf_canvas_table} where du + emp > 0) b
    where st_intersects(b.wkb_geometry, a.wkb_geometry) group by a.id, a.wkb_geometry, a.attractions_hbw;
    '''.format(public_health_variables_schema=sql_config_dict[
        'public_health_variables_schema'],
               uf_canvas_schema=sql_config_dict['uf_canvas_schema'],
               uf_canvas_table=sql_config_dict['uf_canvas_table'],
               trip_lengths_schema=sql_config_dict['trip_lengths_schema'],
               trip_lengths_table=sql_config_dict['trip_lengths_table'])

    execute_sql(pSql)

    add_geom_idx(sql_config_dict['public_health_variables_schema'],
                 sql_config_dict['uf_canvas_table'] + '_variable')
    add_primary_key(sql_config_dict['public_health_variables_schema'],
                    sql_config_dict['uf_canvas_table'] + '_variable', 'id')

    add_analysis_geom(sql_config_dict['public_health_variables_schema'],
                      sql_config_dict['public_health_variables_table'])

    aggregate_within_variable_distance(
        dict(source_table=sql_config_dict['public_health_variables_schema'] +
             '.' + sql_config_dict['uf_canvas_table'] + '_variable',
             source_table_query='id is not null',
             target_table_schema=sql_config_dict[
                 'public_health_variables_schema'],
             target_table=sql_config_dict['public_health_variables_table'],
             target_table_query='pop > 0',
             target_table_pk='id',
             suffix='variable',
             aggregation_type='sum',
             variable_field_list=['du_variable', 'emp_variable']))

    drop_table(
        '{public_health_variables_schema}.{uf_canvas_table}_variable'.format(
            public_health_variables_schema=sql_config_dict[
                'public_health_variables_schema'],
            uf_canvas_table=sql_config_dict['uf_canvas_table']))
Пример #12
0
    def aggregate_results_to_block_group(self, sql_config_dict):
        """
        Aggregates the result table (at grid scale) to a Census Block Group result
        """

        attribute_list = filter(lambda x: x not in ['id', 'hh', 'pop', 'pop_adult', 'pop_adult_high', 'pop_adult_med',
                                                    'pop_adult_low', 'pop_senior', 'pop_teen', 'pop_children'],
                                self.outcome_fields)

        field_calculations = ''',
        '''.join([
            "case when SUM(grid.pop) > 0 then SUM(grid.{field} * grid.pop) / SUM(grid.pop) else 0 end as {field}".format(
                field=field)
            for field in attribute_list
        ])

        drop_table('{block_group_outcome_schema}.{block_group_outcome_table}'.format(**sql_config_dict))

        create_blockgroup_results = """
        CREATE TABLE {block_group_outcome_schema}.{block_group_outcome_table} AS SELECT

          bg.id as id,
          bg.blockgroup as blockgroup,
          bg.tract as tract,
          bg.county_name as county_name,

          SUM(grid.pop) as pop,
          SUM(grid.hh) as hh,
          SUM(grid.pop_adult) as pop_adult,

          SUM(grid.pop_adult_high) as pop_adult_high,
          SUM(grid.pop_adult_med) as pop_adult_med,
          SUM(grid.pop_adult_low) as pop_adult_low,

          SUM(grid.pop_senior) as pop_senior,
          SUM(grid.pop_teen) as pop_teen,
          SUM(grid.pop_children) as pop_children,

          {field_calculations},

          bg.wkb_geometry as wkb_geometry

        FROM (select grid_id, census_id from {public_health_variables_schema}.{source_grid_table}_join group by grid_id, census_id) grid_portions
        inner join {grid_outcome_schema}.{grid_outcome_table} grid on grid_portions.grid_id = grid.id
        inner join {census_rate_schema}.{census_rate_table} bg on grid_portions.census_id = bg.id

        group by bg.id, bg.blockgroup, tract, bg.county_name, bg.wkb_geometry;

        """.format(field_calculations=field_calculations, **sql_config_dict)

        execute_sql(create_blockgroup_results)
Пример #13
0
    def run_water_calculations(self, **kwargs):
        self.format_policy_inputs()

        self.water_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.WATER)
        self.base_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.BASE_CANVAS)
        self.climate_zone_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.CLIMATE_ZONES)
        self.rel_table = parse_schema_and_table(
            self.water_class._meta.db_table)[1]
        self.rel_column = self.water_class._meta.parents.values()[0].column

        if isinstance(self.config_entity.subclassed, FutureScenario):
            self.report_progress(0.2, **kwargs)
            self.end_state_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.END_STATE)

            water_output_list, options = self.run_future_water_calculations(
                **kwargs)
        else:
            self.report_progress(0.2, **kwargs)
            water_output_list, options = self.run_base_water_calculations()
            self.report_progress(0.7, **kwargs)

        self.write_water_results_to_database(options, water_output_list)

        updated = datetime.datetime.now()
        truncate_table(options['water_schema'] + '.' + self.rel_table)

        pSql = '''
        insert into {water_schema}.{rel_table} ({rel_column}, updated) select id, '{updated}' from {water_schema}.{water_result_table};'''.format(
            water_schema=options['water_schema'],
            water_result_table=options['water_result_table'],
            rel_table=self.rel_table,
            rel_column=self.rel_column,
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(
            self.config_entity,
            self.config_entity.computed_db_entities(key=DbEntityKey.WATER)[0])
        self.report_progress(0.10000001, **kwargs)
Пример #14
0
def run_aggregate_within_variable_distance_processes(sql_config_dict):

    drop_table('{public_health_variables_schema}.{uf_canvas_table}_variable'.format(
        public_health_variables_schema=sql_config_dict['public_health_variables_schema'],
        uf_canvas_table=sql_config_dict['uf_canvas_table']))

    pSql = '''
    create table {public_health_variables_schema}.{uf_canvas_table}_variable
    as select
      a.id, st_transform(a.wkb_geometry, 3310) as wkb_geometry, cast(a.attractions_hbw * 1609.0 as float) as distance,
      sum(du * st_area(st_intersection(a.wkb_geometry, b.wkb_geometry)) / st_area(b.wkb_geometry)) as du_variable,
      sum(emp * st_area(st_intersection(a.wkb_geometry, b.wkb_geometry)) / st_area(b.wkb_geometry)) as emp_variable
    from
      (select id, wkb_geometry, attractions_hbw from {trip_lengths_schema}.{trip_lengths_table}) a,
      (select wkb_geometry, du, emp from {uf_canvas_schema}.{uf_canvas_table} where du + emp > 0) b
    where st_intersects(b.wkb_geometry, a.wkb_geometry) group by a.id, a.wkb_geometry, a.attractions_hbw;
    '''.format(public_health_variables_schema=sql_config_dict['public_health_variables_schema'],
               uf_canvas_schema=sql_config_dict['uf_canvas_schema'],
               uf_canvas_table=sql_config_dict['uf_canvas_table'],
               trip_lengths_schema=sql_config_dict['trip_lengths_schema'],
               trip_lengths_table=sql_config_dict['trip_lengths_table'])

    execute_sql(pSql)

    add_geom_idx(sql_config_dict['public_health_variables_schema'], sql_config_dict['uf_canvas_table'] + '_variable')
    add_primary_key(sql_config_dict['public_health_variables_schema'], sql_config_dict['uf_canvas_table'] + '_variable', 'id')

    add_analysis_geom(sql_config_dict['public_health_variables_schema'], sql_config_dict['public_health_variables_table'])

    aggregate_within_variable_distance(dict(
        source_table=sql_config_dict['public_health_variables_schema'] + '.' + sql_config_dict['uf_canvas_table'] + '_variable',
        source_table_query='id is not null',
        target_table_schema=sql_config_dict['public_health_variables_schema'],
        target_table=sql_config_dict['public_health_variables_table'],
        target_table_query='pop > 0',
        target_table_pk='id',
        suffix='variable',
        aggregation_type='sum',
        variable_field_list=['du_variable', 'emp_variable']
    ))

    drop_table('{public_health_variables_schema}.{uf_canvas_table}_variable'.format(
        public_health_variables_schema=sql_config_dict['public_health_variables_schema'],
        uf_canvas_table=sql_config_dict['uf_canvas_table']))
Пример #15
0
    def update_result_table_geometry_column(self, options):
        pSql = '''alter table {vmt_schema}.{vmt_result_table} add column wkb_geometry geometry (GEOMETRY, 4326);
        '''.format(**options)
        execute_sql(pSql)

        pSql = '''
        update {vmt_schema}.{vmt_result_table} b set wkb_geometry = a.wkb_geometry
            from (select id, wkb_geometry from {uf_canvas_schema}.{uf_canvas_table}) a
            where cast(a.id as int) = cast(b.id as int);
        CREATE INDEX ON {vmt_schema}.{vmt_result_table}  USING gist (wkb_geometry);
        '''.format(**options)

        execute_sql(pSql)

        updated = datetime.datetime.now()
        truncate_table(options['vmt_schema'] + '.' + options['vmt_rel_table'])

        pSql = '''
        insert into {vmt_schema}.{rel_table} ({rel_column}, updated)
          select id, '{updated}' from {vmt_schema}.{vmt_result_table};'''.format(
            vmt_schema=options['vmt_schema'],
            vmt_result_table=options['vmt_result_table'],
            rel_table=options['vmt_rel_table'],
            rel_column=options['vmt_rel_column'],
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(
            options['config_entity'],
            options['config_entity'].computed_db_entities(
                key=DbEntityKey.VMT)[0])
Пример #16
0
    def update_result_table_geometry_column(self, options):
        pSql = '''alter table {vmt_schema}.{vmt_result_table} add column wkb_geometry geometry (GEOMETRY, 4326);
        '''.format(**options)
        execute_sql(pSql)

        pSql = '''
        update {vmt_schema}.{vmt_result_table} b set wkb_geometry = a.wkb_geometry
            from (select id, wkb_geometry from {uf_canvas_schema}.{uf_canvas_table}) a
            where cast(a.id as int) = cast(b.id as int);
        CREATE INDEX ON {vmt_schema}.{vmt_result_table}  USING gist (wkb_geometry);
        '''.format(**options)

        execute_sql(pSql)

        updated = datetime.datetime.now()
        truncate_table(options['vmt_schema'] + '.' + options['vmt_rel_table'])

        pSql = '''
        insert into {vmt_schema}.{rel_table} ({rel_column}, updated)
          select id, '{updated}' from {vmt_schema}.{vmt_result_table};'''.format(
            vmt_schema=options['vmt_schema'],
            vmt_result_table=options['vmt_result_table'],
            rel_table=options['vmt_rel_table'],
            rel_column=options['vmt_rel_column'],
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(options['config_entity'], options['config_entity'].computed_db_entities(key=DbEntityKey.VMT)[0])
    def write_results_to_database(self, options, public_health_output_list):

        drop_table(
            '{grid_outcome_schema}.{grid_outcome_table}'.format(**options))

        attribute_list = filter(lambda x: x != 'id', self.outcome_fields)
        options['output_field_syntax'] = 'id int, ' + \
                                         create_sql_calculations(attribute_list, '{0} numeric(20,8)')

        execute_sql(
            "create table {grid_outcome_schema}.{grid_outcome_table} ({output_field_syntax});"
            .format(**options))

        output_textfile = StringIO("")
        for row in public_health_output_list:
            stringrow = []
            for item in row:
                if isinstance(item, int):
                    stringrow.append(str(item))
                else:
                    stringrow.append(str(round(item, 8)))
            output_textfile.write("\t".join(stringrow) + "\n")

        output_textfile.seek(os.SEEK_SET)
        #copy text file output back into Postgres
        copy_from_text_to_db(
            output_textfile,
            '{grid_outcome_schema}.{grid_outcome_table}'.format(**options))
        output_textfile.close()
        ##---------------------------
        pSql = '''alter table {grid_outcome_schema}.{grid_outcome_table}
                    add column wkb_geometry geometry (GEOMETRY, 4326);'''.format(
            **options)
        execute_sql(pSql)

        pSql = '''update {grid_outcome_schema}.{grid_outcome_table} b set
                    wkb_geometry = st_setSRID(a.wkb_geometry, 4326)
                    from (select id, wkb_geometry from {source_grid_schema}.{source_grid_table}) a
                    where cast(a.id as int) = cast(b.id as int);
        '''.format(**options)
        execute_sql(pSql)

        add_geom_idx(options['grid_outcome_schema'],
                     options['grid_outcome_table'], 'wkb_geometry')
        add_primary_key(options['grid_outcome_schema'],
                        options['grid_outcome_table'], 'id')

        # Since not every grid cell results in a grid_outcome, we need to wipe out the rel
        # table and recreate it to match the base grid_coutcome table. Otherwise there will
        # be to many rel table rows and cloning the DbEntity or ConfigEntity will fail
        logger.info(
            "Writing to relative table {grid_outcome_schema}.{grid_outcome_table}rel"
            .format(**options))
        truncate_table(
            "{grid_outcome_schema}.{grid_outcome_table}rel".format(**options))
        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(
            self.config_entity,
            self.config_entity.computed_db_entities(
                key=DbEntityKey.PH_GRID_OUTCOMES)[0])
Пример #18
0
    def run_water_calculations(self, **kwargs):
        self.format_policy_inputs()

        self.water_class = self.config_entity.db_entity_feature_class(DbEntityKey.WATER)
        self.base_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)
        self.climate_zone_class = self.config_entity.db_entity_feature_class(DbEntityKey.CLIMATE_ZONES)
        self.rel_table = parse_schema_and_table(self.water_class._meta.db_table)[1]
        self.rel_column = self.water_class._meta.parents.values()[0].column

        if isinstance(self.config_entity.subclassed, FutureScenario):
            self.report_progress(0.2, **kwargs)
            self.end_state_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE)


            water_output_list, options = self.run_future_water_calculations(**kwargs)
        else:
            self.report_progress(0.2, **kwargs)
            water_output_list, options = self.run_base_water_calculations()
            self.report_progress(0.7, **kwargs)

        self.write_water_results_to_database(options, water_output_list)

        updated = datetime.datetime.now()
        truncate_table(options['water_schema'] + '.' + self.rel_table)

        pSql = '''
        insert into {water_schema}.{rel_table} ({rel_column}, updated) select id, '{updated}' from {water_schema}.{water_result_table};'''.format(
            water_schema=options['water_schema'],
            water_result_table=options['water_result_table'],
            rel_table=self.rel_table,
            rel_column=self.rel_column,
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(self.config_entity, self.config_entity.computed_db_entities(key=DbEntityKey.WATER)[0])
        self.report_progress(0.10000001, **kwargs)
Пример #19
0
    def write_results_to_database(self, options, energy_output_list):

        drop_table('{energy_schema}.{energy_result_table}'.format(**options))

        attribute_list = filter(
            lambda x: x not in ['id', 'title24_zone', 'fcz_zone'],
            self.output_fields)

        output_field_syntax = 'id int, title24_zone int, fcz_zone int, ' + create_sql_calculations(
            attribute_list, '{0} numeric(14, 4)')

        pSql = '''
        create table {energy_schema}.{energy_result_table} ({output_field_syntax});'''.format(
            output_field_syntax=output_field_syntax, **options)
        execute_sql(pSql)

        output_textfile = StringIO("")

        for row in energy_output_list:
            stringrow = []
            for item in row:
                if isinstance(item, int):
                    stringrow.append(str(item))
                else:
                    stringrow.append(str(round(item, 4)))
            output_textfile.write("\t".join(stringrow) + "\n")

        output_textfile.seek(os.SEEK_SET)
        #copy text file output back into Postgres
        copy_from_text_to_db(
            output_textfile,
            '{energy_schema}.{energy_result_table}'.format(**options))
        output_textfile.close()

        pSql = '''alter table {energy_schema}.{energy_result_table} add column wkb_geometry geometry (GEOMETRY, 4326);'''.format(
            **options)
        execute_sql(pSql)

        pSql = '''update {energy_schema}.{energy_result_table} b set
                    wkb_geometry = st_setSRID(a.wkb_geometry, 4326)
                    from (select id, wkb_geometry from {base_schema}.{base_table}) a
                    where cast(a.id as int) = cast(b.id as int);
        '''.format(**options)

        execute_sql(pSql)

        add_geom_idx(options['energy_schema'], options['energy_result_table'],
                     'wkb_geometry')
        add_primary_key(options['energy_schema'],
                        options['energy_result_table'], 'id')
        add_attribute_idx(options['energy_schema'],
                          options['energy_result_table'],
                          'annual_million_btus_per_unit')
Пример #20
0
    def write_results_to_database(self, options, public_health_output_list):

        drop_table('{grid_outcome_schema}.{grid_outcome_table}'.format(**options))

        attribute_list = filter(lambda x: x != 'id', self.outcome_fields)
        options['output_field_syntax'] = 'id int, ' + \
                                         create_sql_calculations(attribute_list, '{0} numeric(20,8)')

        execute_sql("create table {grid_outcome_schema}.{grid_outcome_table} ({output_field_syntax});".format(
            **options))

        output_textfile = StringIO("")
        for row in public_health_output_list:
            stringrow = []
            for item in row:
                if isinstance(item, int):
                    stringrow.append(str(item))
                else:
                    stringrow.append(str(round(item, 8)))
            output_textfile.write("\t".join(stringrow) + "\n")

        output_textfile.seek(os.SEEK_SET)
        #copy text file output back into Postgres
        copy_from_text_to_db(output_textfile, '{grid_outcome_schema}.{grid_outcome_table}'.format(**options))
        output_textfile.close()
        ##---------------------------
        pSql = '''alter table {grid_outcome_schema}.{grid_outcome_table}
                    add column wkb_geometry geometry (GEOMETRY, 4326);'''.format(**options)
        execute_sql(pSql)

        pSql = '''update {grid_outcome_schema}.{grid_outcome_table} b set
                    wkb_geometry = st_setSRID(a.wkb_geometry, 4326)
                    from (select id, wkb_geometry from {source_grid_schema}.{source_grid_table}) a
                    where cast(a.id as int) = cast(b.id as int);
        '''.format(**options)
        execute_sql(pSql)

        add_geom_idx(options['grid_outcome_schema'], options['grid_outcome_table'], 'wkb_geometry')
        add_primary_key(options['grid_outcome_schema'], options['grid_outcome_table'],  'id')

        # Since not every grid cell results in a grid_outcome, we need to wipe out the rel
        # table and recreate it to match the base grid_coutcome table. Otherwise there will
        # be to many rel table rows and cloning the DbEntity or ConfigEntity will fail
        logger.info("Writing to relative table {grid_outcome_schema}.{grid_outcome_table}rel".format(**options))
        truncate_table("{grid_outcome_schema}.{grid_outcome_table}rel".format(**options))
        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(
            self.config_entity,
            self.config_entity.computed_db_entities(key=DbEntityKey.PH_GRID_OUTCOMES)[0])
Пример #21
0
    def write_results_to_database(self, options, energy_output_list):

        drop_table('{energy_schema}.{energy_result_table}'.format(**options))

        attribute_list = filter(lambda x: x not in ['id', 'title24_zone', 'fcz_zone'], self.output_fields)

        output_field_syntax = 'id int, title24_zone int, fcz_zone int, ' + create_sql_calculations(attribute_list, '{0} numeric(14, 4)')

        pSql = '''
        create table {energy_schema}.{energy_result_table} ({output_field_syntax});'''.format(output_field_syntax=output_field_syntax, **options)
        execute_sql(pSql)

        output_textfile = StringIO("")

        for row in energy_output_list:
            stringrow = []
            for item in row:
                if isinstance(item, int):
                    stringrow.append(str(item))
                else:
                    stringrow.append(str(round(item, 4)))
            output_textfile.write("\t".join(stringrow) + "\n")

        output_textfile.seek(os.SEEK_SET)
        #copy text file output back into Postgres
        copy_from_text_to_db(output_textfile, '{energy_schema}.{energy_result_table}'.format(**options))
        output_textfile.close()

        pSql = '''alter table {energy_schema}.{energy_result_table} add column wkb_geometry geometry (GEOMETRY, 4326);'''.format(**options)
        execute_sql(pSql)

        pSql = '''update {energy_schema}.{energy_result_table} b set
                    wkb_geometry = st_setSRID(a.wkb_geometry, 4326)
                    from (select id, wkb_geometry from {base_schema}.{base_table}) a
                    where cast(a.id as int) = cast(b.id as int);
        '''.format(**options)

        execute_sql(pSql)

        add_geom_idx(options['energy_schema'], options['energy_result_table'], 'wkb_geometry')
        add_primary_key(options['energy_schema'], options['energy_result_table'],  'id')
        add_attribute_idx(options['energy_schema'], options['energy_result_table'],  'annual_million_btus_per_unit')
Пример #22
0
def add_analysis_geom(schema, table):

    pSql = '''
    DROP INDEX {schema}.{schema}_{table}_analysis_geom;
    Alter Table {schema}.{table} drop column analysis_geom;'''.format(schema=schema, table=table)
    try:
        execute_sql(pSql)
    except:
        print "analysis column did not exist... removing"

    pSql = '''alter table {schema}.{table} add column analysis_geom geometry;'''.format(schema=schema, table=table)
    try:
        execute_sql(pSql)
    except:
        print "analysis column already exists... continuing"


    pSql = '''update {schema}.{table} set analysis_geom = st_setSRID(st_transform(wkb_geometry, 3310), 3310);
    create index {schema}_{table}_analysis_geom on {schema}.{table} using gist (analysis_geom);
    '''.format(schema=schema, table=table)

    execute_sql(pSql)
Пример #23
0
def add_analysis_geom(schema, table):

    pSql = '''
    DROP INDEX {schema}.{schema}_{table}_analysis_geom;
    Alter Table {schema}.{table} drop column analysis_geom;'''.format(schema=schema, table=table)
    try:
        execute_sql(pSql)
    except:
        print "analysis column did not exist... removing"

    pSql = '''alter table {schema}.{table} add column analysis_geom geometry;'''.format(schema=schema, table=table)
    try:
        execute_sql(pSql)
    except:
        print "analysis column already exists... continuing"


    pSql = '''update {schema}.{table} set analysis_geom = st_setSRID(st_transform(wkb_geometry, 3310), 3310);
    create index {schema}_{table}_analysis_geom on {schema}.{table} using gist (analysis_geom);
    '''.format(schema=schema, table=table)

    execute_sql(pSql)
def aggregate_within_distance(distance_options):

    thread_count = count_cores()
    queue = queue_process()

    source_table_column_list = []

    for key, value in distance_options['variable_field_dict'].items():
        source_table_column_list += value

    source_table_column_list = list(set(source_table_column_list))

    sql_format = 'out {formatter} float'.format(formatter="{0}")
    output_field_format = create_sql_calculations(source_table_column_list, sql_format, ', ')

    sql_format = 'cast({aggregation_type}({formatter}) as float) as {formatter}_{suffix}'.format(formatter="{0}", **distance_options)
    sql_calculations_format = create_sql_calculations(source_table_column_list, sql_format, ', ')

    pSql = '''drop function if exists aggregate_within_distance_tool(
      in_id int,
      in_wkb_geometry geometry,
      out id int,
      {output_field_format}) cascade;'''.format(
        output_field_format=output_field_format)

    execute_sql(pSql)

    pSql = '''
    CREATE OR REPLACE FUNCTION aggregate_within_distance_tool(
      in_id int,
      in_wkb_geometry geometry,
      out id int,
      {output_field_format})
    AS
    $$
      select
        $1 as id,
        {sql_calculations_format}

    from (select *, {source_geometry_column} as geometry from {source_table}) ref
        where ST_DWITHIN( $2, ref.geometry, {distance}) and (ref.{source_table_query});
    $$
    COST 10000
    language SQL STABLE strict;
    '''.format(source_table=distance_options['source_table'],
               source_table_query=distance_options['source_table_query'],
               distance=distance_options['distance'],
               source_geometry_column=distance_options['source_geometry_column'],
               output_field_format=output_field_format,
               sql_calculations_format=sql_calculations_format)

    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{suffix}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'], suffix=distance_options['suffix']))

    sql_format = '{formatter}_{suffix} float'.format(formatter="{0}", **distance_options)
    output_table_field_format = create_sql_calculations(source_table_column_list, sql_format, ', ')

    pSql = '''create table {target_table_schema}.{target_table}_{suffix} (id int, {output_table_field_format});'''.format(
        target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'],
        suffix=distance_options['suffix'], output_table_field_format=output_table_field_format)

    execute_sql(pSql)

    pSql = 'select cast({target_table_pk} as int) from {target_table_schema}.{target_table} where {target_table_query} order by {target_table_pk}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        target_table_pk=distance_options['target_table_pk'],
        target_table_query=distance_options['target_table_query'])

    id_list = flatten(report_sql_values(pSql, 'fetchall'))

    insert_sql = '''
    insert into {target_table_schema}.{target_table}_{suffix}
      select (f).* from (
          select aggregate_within_distance_tool({target_table_pk}, {target_geometry_column}) as f
          from {target_table_schema}.{target_table}
          where
          {target_table_pk} >= {bottom_range_id} and
          {target_table_pk} <= {top_range_id} and
          {target_table_query}
          offset 0) s
    where (f).id is not null;
    '''.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        target_table_query=distance_options['target_table_query'],
        target_geometry_column=distance_options['target_geometry_column'],
        source_table=distance_options['source_table'],
        suffix=distance_options['suffix'],
        target_table_pk=distance_options['target_table_pk'],
        bottom_range_id="{start_id}",
        top_range_id="{end_id}")

    for i in range(thread_count):
        t = MultithreadProcess(queue, insert_sql)
        t.setDaemon(True)
        t.start()

    #populate queue with data
    rows_per_thread = len(id_list) / thread_count
    offset = 0

    for i in range(thread_count):
        if i == thread_count - 1:
            ## last bucket gets any remainder, too
            last_thread = len(id_list) - 1
        else:
            last_thread = offset + rows_per_thread - 1

        rows_to_process = {
            'start_id': id_list[offset],
            'end_id': id_list[last_thread]
        }
        offset += rows_per_thread
        queue.put(rows_to_process)

    #wait on the queue until everything has been processed
    queue.join()

    add_attribute_idx(distance_options['target_table_schema'],
                      '{target_table}_{suffix}'.format(target_table=distance_options['target_table'],
                                                       suffix=distance_options['suffix']), 'id')

    count = 1
    update_sql_format = ''
    if len(distance_options['variable_field_dict']) > 0:
        for key, value in distance_options['variable_field_dict'].items():
            update_table_field_format = create_sql_calculations(value, '{formatter}_{suffix}'.format(formatter='b.{0}',
                                                                                                     **distance_options), ' + ')
            if count == 1:
                update_sql_format += key + ' = ' + "(case when {0} is null then 0 else {0} end)".format(update_table_field_format)
            else:
                update_sql_format += ', ' + key + ' = ' + "(case when {0} is null then 0 else {0} end)".format(update_table_field_format)
            count +=1


        pSql = '''
        update {target_table_schema}.{target_table} a set {update_sql_format}
            from (select * from {target_table_schema}.{target_table}_{suffix}) b
            where a.{target_table_pk} = b.id and {target_table_query}
        '''.format(
            target_table_schema=distance_options['target_table_schema'],
            target_table=distance_options['target_table'],
            target_table_query=distance_options['target_table_query'],
            target_table_pk=distance_options['target_table_pk'],
            update_sql_format=update_sql_format,
            suffix=distance_options['suffix']
        )
        execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{suffix}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        suffix=distance_options['suffix']))
    def update(self, **kwargs):
        """
            This function handles the update or creation on the environmental constraints geography producing the area
            for each layer with the environmental constraint behavior. This function will both add and remove
            constraints and produce the final constraints layer in the primary geography of the active scenario
        """
        # TODO : remove hard-coded 3310 (only works in CA), need to set an "analysis projection" in the Region
        start_time = time.time()

        current_db_entities = \
            set(self.config_entity.db_entities_having_behavior_key(BehaviorKey.Fab.ricate('environmental_constraint')))

        base_feature_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.BASE_CANVAS)

        options = dict(project_schema=parse_schema_and_table(
            base_feature_class._meta.db_table)[0],
                       base_table=base_feature_class.db_entity_key)

        logger.info('Inserting raw geographies into the environmental constraint geographies table for DbEntities: %s' % \
                    ', '.join(map(lambda db_entity: db_entity.name, current_db_entities)))

        drop_table(
            '{project_schema}.environmental_constraint_geographies_table'.
            format(project_schema=options['project_schema']))

        current_environmental_constraints = []
        for db_entity in current_db_entities:
            constraint_class = self.config_entity.db_entity_feature_class(
                db_entity.key)
            current_environmental_constraints.append(
                constraint_class.db_entity_key)

        create_id_field_format = create_sql_calculations(
            current_environmental_constraints, '{0}_id int')
        insert_id_field_format = create_sql_calculations(
            current_environmental_constraints, '{0}_id')

        pSql = '''
        create table {project_schema}.environmental_constraint_geographies_table
            (primary_id integer, wkb_geometry geometry, {create_id_field_format});
        SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table', 'wkb_geometry', 3310)

        '''.format(project_schema=options['project_schema'],
                   create_id_field_format=create_id_field_format)

        execute_sql(pSql)

        for db_entity in current_db_entities:
            logger.info(
                'Inserting into environmental constraint geographies table for DbEntity: %s'
                % db_entity.full_name)

            constraint_class = self.config_entity.db_entity_feature_class(
                db_entity.key)

            pSql = '''
                insert into {project_schema}.environmental_constraint_geographies_table (primary_id, wkb_geometry, {constraint_db_entity_key}_id) select
                    cast(primary_id as int), wkb_geometry, {constraint_db_entity_key}_id from (
                    select
                        id as primary_id,
                        {constraint_db_entity_id} as {constraint_db_entity_key}_id,
                        st_setSRID(st_transform(st_buffer((st_dump(wkb_geometry)).geom, 0), 3310), 3310) as wkb_geometry

                    from (
                        select b.id, st_intersection(a.wkb_geometry, b.wkb_geometry) as wkb_geometry
	                    from {constraint_schema}.{constraint_db_entity_key} a,
                        {project_schema}.{base_table} b
                            where st_intersects(a.wkb_geometry, b.wkb_geometry)) as intersection
                    ) as polygons;
                '''.format(
                project_schema=options['project_schema'],
                base_table=options['base_table'],
                constraint_schema=parse_schema_and_table(
                    constraint_class._meta.db_table)[0],
                constraint_db_entity_key=constraint_class.db_entity_key,
                constraint_db_entity_id=db_entity.id)

            execute_sql(pSql)

            logger.info(
                'finished inserting db_entity: {db_entity} {time} elapsed'.
                format(time=time.time() - start_time,
                       db_entity=constraint_class.db_entity_key))

        #only regenerate the merged environmental constraint whenever an envrionmental constraint is added or removed
        # from the layer

        add_geom_idx(options['project_schema'],
                     'environmental_constraint_geographies_table')

        logger.info('Unioning all environmental constraint geographies')
        drop_table(
            '{project_schema}.environmental_constraint_geographies_table_unioned'
            .format(project_schema=options['project_schema']))

        pSql = '''
            CREATE TABLE {project_schema}.environmental_constraint_geographies_table_unioned
                (id serial, wkb_geometry geometry, acres float, primary_id int, {create_id_field_format});
            SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table_unioned', 'wkb_geometry', 3310);
        '''.format(project_schema=options['project_schema'],
                   create_id_field_format=create_id_field_format)

        execute_sql(pSql)

        pSql = '''
        insert into {project_schema}.environmental_constraint_geographies_table_unioned (wkb_geometry, acres, primary_id, {insert_id_field_format})
               SELECT
                    st_buffer(wkb_geometry, 0) as wkb_geometry,
                    st_area(st_buffer(wkb_geometry, 0)) * 0.000247105 as acres,
                    primary_id, {insert_id_field_format}

                    FROM (
                        SELECT
                            (ST_Dump(wkb_geometry)).geom as wkb_geometry,
                            primary_id, {insert_id_field_format}

                        FROM (
                            SELECT ST_Polygonize(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}   FROM (
                                SELECT ST_Collect(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}   FROM (
                                    SELECT ST_ExteriorRing(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}
                                        FROM {project_schema}.environmental_constraint_geographies_table) AS lines
                                            group by primary_id, {insert_id_field_format}) AS noded_lines
                                                group by primary_id, {insert_id_field_format}) as polygons
                    ) as final
                WHERE st_area(st_buffer(wkb_geometry, 0)) > 5;'''.format(
            project_schema=options['project_schema'],
            insert_id_field_format=insert_id_field_format)

        execute_sql(pSql)

        logger.info('finished unioning env constraints: {time} elapsed'.format(
            time=time.time() - start_time))

        #reproject table back to 4326 for integration with web viewing
        pSql = '''
        SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table_unioned', 'wkb_geometry', 4326);
        update {project_schema}.environmental_constraint_geographies_table_unioned a set wkb_geometry = st_transform(st_buffer(wkb_geometry, 0), 4326);
        '''.format(project_schema=options['project_schema'])
        execute_sql(pSql)

        add_geom_idx(options['project_schema'],
                     'environmental_constraint_geographies_table_unioned')

        logger.info('Env Union Finished: %s' % str(time.time() - start_time))
    def aggregate_results_to_outcomes_summary_table(self, sql_config_dict):
        """
        Aggregates the result table (at grid scale) to a standardized summary table
        """

        attribute_list = filter(
            lambda x: x not in [
                'id', 'hh', 'pop', 'pop_adult', 'pop_adult_high',
                'pop_adult_med', 'pop_adult_low', 'pop_senior', 'pop_teen',
                'pop_children'
            ], self.outcome_fields)
        output_list = filter(lambda x: x not in ['id'], self.outcome_fields)

        field_calculations = ''',
        '''.join([
            "case when SUM(pop) > 0 then SUM({field} * pop) / SUM(pop) else 0 end as {field}"
            .format(field=field) for field in attribute_list
        ])

        truncate_table(
            '{outcome_summary_schema}.{outcome_summary_table}'.format(
                **sql_config_dict))

        pSql = """
        SELECT
          SUM(hh) as hh,
          SUM(pop) as pop,
          SUM(pop_adult) as pop_adult,
          SUM(pop_adult_high) as pop_adult_high,
          SUM(pop_adult_med) as pop_adult_med,
          SUM(pop_adult_low) as pop_adult_low,
          SUM(pop_senior) as pop_senior,
          SUM(pop_teen) as pop_teen,
          SUM(pop_children) as pop_children,

          {field_calculations}

        FROM {grid_outcome_schema}.{grid_outcome_table};
        """.format(field_calculations=field_calculations, **sql_config_dict)

        summary_results = flatten(report_sql_values(pSql, 'fetchall'))

        index_id = 0
        for result in output_list:

            source = 'ref'
            model_name = result
            ph_models = self.list_UD4H_models()

            if '_minutes' in model_name:
                model_name = model_name[:-8] + '_min'

            for model in ph_models:
                ph_model_name = "".join(model[0]['name'].split())[:-4].lower()
                if ph_model_name == model_name:
                    source = model[0]['source']

            pSql = '''
            insert into {outcome_summary_schema}.{outcome_summary_table} values ({index_id}, '{column_name}', '{source}', {model_output});
            '''.format(index_id=index_id,
                       column_name=str(result),
                       source=str(source),
                       model_output=summary_results[index_id],
                       **sql_config_dict)

            execute_sql(pSql)
            index_id += 1
    def update(self, **kwargs):

        logger.debug("Executing Environmental Constraints using {0}".format(
            self.config_entity))
        config_entity = self.config_entity
        end_state_feature_class = config_entity.db_entity_feature_class(
            DbEntityKey.END_STATE)
        base_table = config_entity.db_entity_feature_class(
            DbEntityKey.BASE_CANVAS)

        options = dict(project_schema=parse_schema_and_table(
            base_table._meta.db_table)[0],
                       scenario_schema=parse_schema_and_table(
                           end_state_feature_class._meta.db_table)[0],
                       end_state_table=end_state_feature_class.db_entity_key)

        current_db_entities, db_entities_to_add, db_entities_to_delete = \
            self.update_or_create_environmental_constraint_percents(config_entity)

        current_environmental_constraints = []
        logger.debug("Current db_entities {0}".format(current_db_entities))
        for db_entity in current_db_entities:
            logger.debug("Active db_entity {0}".format(db_entity.key))
            constraint_class = config_entity.db_entity_feature_class(
                db_entity.key)
            environmental_constraint_percent = EnvironmentalConstraintPercent.objects.filter(
                db_entity_id=db_entity.id, analysis_tool_id=self.id)[0]
            current_environmental_constraints.append(
                dict(key=constraint_class.db_entity_key,
                     priority=environmental_constraint_percent.priority,
                     percent=environmental_constraint_percent.percent))

        pSql = '''
        DO $$
            BEGIN
                BEGIN
                    ALTER TABLE {project_schema}.environmental_constraint_geographies_table_unioned ADD COLUMN constraint_acres_{config_entity_id} float;
                EXCEPTION
                    WHEN duplicate_column
                        THEN -- do nothing;
                END;
            END;
        $$'''.format(project_schema=options['project_schema'],
                     config_entity_id=self.config_entity.id)
        execute_sql(pSql)

        logger.info(
            'Calculate constraint acreage for the active scenario end state feature'
        )
        for db_entity in current_db_entities:
            constraint_class = self.config_entity.db_entity_feature_class(
                db_entity.key)
            environmental_constraint_percent = EnvironmentalConstraintPercent.objects.filter(
                db_entity_id=db_entity.id, analysis_tool_id=self.id)[0]
            constraint_percent = environmental_constraint_percent.percent
            active_constraint = filter(
                lambda dct: constraint_class.db_entity_key in dct['key'],
                current_environmental_constraints)[0]
            priority_constraints = filter(
                lambda dct: dct['priority'] < active_constraint['priority'] or
                (dct['priority'] == active_constraint['priority'] and dct[
                    'percent'] > active_constraint['percent']),
                current_environmental_constraints)

            priority_key_list = []
            for constraint in priority_constraints:
                priority_key_list.append(constraint['key'])

            priority_query = create_sql_calculations(
                priority_key_list, ' and {0}_id is null',
                ' and a.primary_id is not null')

            pSql = '''
            update {project_schema}.environmental_constraint_geographies_table_unioned a set
                constraint_acres_{config_entity_id} = acres * {percent} where {constraint}_id = {constraint_id} {priority_query};
            '''.format(project_schema=options['project_schema'],
                       constraint=constraint_class.db_entity_key,
                       constraint_id=db_entity.id,
                       percent=constraint_percent,
                       priority_query=priority_query,
                       config_entity_id=self.config_entity.id)

            execute_sql(pSql)

        pSql = '''
        update {scenario_schema}.{end_state_table} a set
            acres_developable = a.acres_gross - b.constraint_acres
            FROM
            (select primary_id,
                    sum(constraint_acres_{config_entity_id}) as constraint_acres
                from {project_schema}.environmental_constraint_geographies_table_unioned
                    where constraint_acres_{config_entity_id} is not null group by primary_id) b
        where a.id= b.primary_id;
        '''.format(scenario_schema=options['scenario_schema'],
                   project_schema=options['project_schema'],
                   end_state_table=options['end_state_table'],
                   config_entity_id=self.config_entity.id)

        execute_sql(pSql)

        pSql = '''
        update {scenario_schema}.{end_state_table}
        set developable_proportion = (
            case when acres_gross > 0 then acres_developable / acres_gross else 0 end
        )
        '''.format(scenario_schema=options['scenario_schema'],
                   end_state_table=options['end_state_table'])
        execute_sql(pSql)
def aggregate_within_distance(distance_options):

    thread_count = count_cores()
    queue = queue_process()

    source_table_column_list = []

    for key, value in distance_options['variable_field_dict'].items():
        source_table_column_list += value

    source_table_column_list = list(set(source_table_column_list))

    sql_format = 'out {formatter} float'.format(formatter="{0}")
    output_field_format = create_sql_calculations(source_table_column_list, sql_format, ', ')

    sql_format = 'cast({aggregation_type}({formatter}) as float) as {formatter}_{suffix}'.format(formatter="{0}", **distance_options)
    sql_calculations_format = create_sql_calculations(source_table_column_list, sql_format, ', ')

    pSql = '''drop function if exists aggregate_within_distance_tool(
      in_id int,
      in_wkb_geometry geometry,
      out id int,
      {output_field_format}) cascade;'''.format(
        output_field_format=output_field_format)

    execute_sql(pSql)

    pSql = '''
    CREATE OR REPLACE FUNCTION aggregate_within_distance_tool(
      in_id int,
      in_wkb_geometry geometry,
      out id int,
      {output_field_format})
    AS
    $$
      select
        $1 as id,
        {sql_calculations_format}

    from (select *, {source_geometry_column} as geometry from {source_table}) ref
        where ST_DWITHIN( $2, ref.geometry, {distance}) and (ref.{source_table_query});
    $$
    COST 10000
    language SQL STABLE strict;
    '''.format(source_table=distance_options['source_table'],
               source_table_query=distance_options['source_table_query'],
               distance=distance_options['distance'],
               source_geometry_column=distance_options['source_geometry_column'],
               output_field_format=output_field_format,
               sql_calculations_format=sql_calculations_format)

    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{suffix}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'], suffix=distance_options['suffix']))

    sql_format = '{formatter}_{suffix} float'.format(formatter="{0}", **distance_options)
    output_table_field_format = create_sql_calculations(source_table_column_list, sql_format, ', ')

    pSql = '''create table {target_table_schema}.{target_table}_{suffix} (id int, {output_table_field_format});'''.format(
        target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'],
        suffix=distance_options['suffix'], output_table_field_format=output_table_field_format)

    execute_sql(pSql)

    pSql = 'select cast({target_table_pk} as int) from {target_table_schema}.{target_table} where {target_table_query} order by {target_table_pk}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        target_table_pk=distance_options['target_table_pk'],
        target_table_query=distance_options['target_table_query'])

    id_list = flatten(report_sql_values(pSql, 'fetchall'))

    insert_sql = '''
    insert into {target_table_schema}.{target_table}_{suffix}
      select (f).* from (
          select aggregate_within_distance_tool({target_table_pk}, {target_geometry_column}) as f
          from {target_table_schema}.{target_table}
          where
          {target_table_pk} >= {bottom_range_id} and
          {target_table_pk} <= {top_range_id} and
          {target_table_query}
          offset 0) s
    where (f).id is not null;
    '''.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        target_table_query=distance_options['target_table_query'],
        target_geometry_column=distance_options['target_geometry_column'],
        source_table=distance_options['source_table'],
        suffix=distance_options['suffix'],
        target_table_pk=distance_options['target_table_pk'],
        bottom_range_id="{start_id}",
        top_range_id="{end_id}")

    for i in range(thread_count):
        t = MultithreadProcess(queue, insert_sql)
        t.setDaemon(True)
        t.start()

    #populate queue with data
    rows_per_thread = len(id_list) / thread_count
    offset = 0

    for i in range(thread_count):
        if i == thread_count - 1:
            ## last bucket gets any remainder, too
            last_thread = len(id_list) - 1
        else:
            last_thread = offset + rows_per_thread - 1

        rows_to_process = {
            'start_id': id_list[offset],
            'end_id': id_list[last_thread]
        }
        offset += rows_per_thread
        queue.put(rows_to_process)

    #wait on the queue until everything has been processed
    queue.join()

    add_attribute_idx(distance_options['target_table_schema'],
                      '{target_table}_{suffix}'.format(target_table=distance_options['target_table'],
                                                       suffix=distance_options['suffix']), 'id')

    count = 1
    update_sql_format = ''
    if len(distance_options['variable_field_dict']) > 0:
        for key, value in distance_options['variable_field_dict'].items():
            update_table_field_format = create_sql_calculations(value, '{formatter}_{suffix}'.format(formatter='b.{0}',
                                                                                                     **distance_options), ' + ')
            if count == 1:
                update_sql_format += key + ' = ' + "(case when {0} is null then 0 else {0} end)".format(update_table_field_format)
            else:
                update_sql_format += ', ' + key + ' = ' + "(case when {0} is null then 0 else {0} end)".format(update_table_field_format)
            count +=1


        pSql = '''
        update {target_table_schema}.{target_table} a set {update_sql_format}
            from (select * from {target_table_schema}.{target_table}_{suffix}) b
            where a.{target_table_pk} = b.id and {target_table_query}
        '''.format(
            target_table_schema=distance_options['target_table_schema'],
            target_table=distance_options['target_table'],
            target_table_query=distance_options['target_table_query'],
            target_table_pk=distance_options['target_table_pk'],
            update_sql_format=update_sql_format,
            suffix=distance_options['suffix']
        )
        execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{suffix}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        suffix=distance_options['suffix']))
Пример #29
0
def run_distance_variables_processes(sql_config_dict):

    geom_analysis_tables = [
        (sql_config_dict['uf_canvas_schema'], sql_config_dict['uf_canvas_table']),
        (sql_config_dict['source_grid_schema'], sql_config_dict['source_grid_table']),
        (sql_config_dict['transit_stop_schema'], sql_config_dict['transit_stop_table']),
        (sql_config_dict['public_health_variables_schema'], sql_config_dict['public_health_variables_table'])
    ]

    add_analysis_geom = '''
    alter table {schema}.{table} drop column if exists analysis_geom cascade;
    alter table {schema}.{table} add column analysis_geom geometry;
    update {schema}.{table} set analysis_geom = st_setSRID(st_transform(wkb_geometry, 3310), 3310);
    create index {schema}_{table}_analysis_geom on {schema}.{table} using gist (analysis_geom);'''

    for schema, table in geom_analysis_tables:
        execute_sql(add_analysis_geom.format(schema=schema, table=table))

    ph_distance_calcs = dict(
        source_geometry_column='analysis_geom',
        target_table_schema=sql_config_dict['public_health_variables_schema'],
        target_table=sql_config_dict['public_health_variables_table'],
        target_table_pk='id',
        target_table_query='pop > 0',
        target_geometry_column='analysis_geom',
        maximum_distance=2000
    )

    calculate_distance(merge(ph_distance_calcs, dict(
        source_table=sql_config_dict['uf_canvas_schema'] + '.' + sql_config_dict['uf_canvas_table'],
        source_table_query='emp_education > 0',
        column='school_distance'
    )))

    calculate_distance(merge(ph_distance_calcs, dict(
        source_table=sql_config_dict['uf_canvas_schema'] + '.' + sql_config_dict['uf_canvas_table'],
        source_table_query='emp_restaurant > 0',
        column='restaurant_distance'
    )))

    calculate_distance(merge(ph_distance_calcs, dict(
        source_table=sql_config_dict['uf_canvas_schema'] + '.' + sql_config_dict['uf_canvas_table'],
        source_table_query='emp_retail_services > 0',
        column='retail_distance'
    )))

    calculate_distance(merge(ph_distance_calcs, dict(
        source_table=sql_config_dict['source_grid_schema'] + '.' + sql_config_dict['source_grid_table'],
        source_table_query='acres_parcel_park_open_space > 0',
        column='park_open_space_distance'
    )))

    calculate_distance(merge(ph_distance_calcs, dict(
        source_table=sql_config_dict['source_grid_schema'] + '.' + sql_config_dict['source_grid_table'],
        source_table_query='freeway_arterial_length_feet > 0',
        maximum_distance=500,
        column='freeway_arterial_any'
    )))

    calculate_distance(merge(ph_distance_calcs, dict(
        source_table=sql_config_dict['transit_stop_schema'] + '.' + sql_config_dict['transit_stop_table'],
        source_table_query='route_type = 0 or route_type = 1 or route_type = 2 or route_type = 3',
        column='transit_distance'
    )))
Пример #30
0
def run_aggregate_within_distance_processes(sql_config_dict):

    aggregate_within_distance(dict(
        source_table=sql_config_dict['uf_canvas_schema'] + '.' + sql_config_dict['uf_canvas_table'],
        source_table_query='du + emp > 0',
        source_geometry_column='analysis_geom',
        target_table_schema=sql_config_dict['public_health_variables_schema'],
        target_table=sql_config_dict['public_health_variables_table'],
        target_geometry_column='analysis_geom',
        target_table_query='pop > 0',
        target_table_pk='id',
        distance=1000,
        suffix='bldg_sqft',
        aggregation_type='sum',
        variable_field_dict=dict(
            bldg_sqft_res=['bldg_sqft_detsf_sl', 'bldg_sqft_detsf_ll', 'bldg_sqft_attsf', 'bldg_sqft_mf'],
            bldg_sqft_ret1=['bldg_sqft_retail_services', 'bldg_sqft_restaurant', 'bldg_sqft_accommodation',
                            'bldg_sqft_arts_entertainment', 'bldg_sqft_other_services'],
            bldg_sqft_ret=['bldg_sqft_retail_services', 'bldg_sqft_restaurant', 'bldg_sqft_arts_entertainment',
                           'bldg_sqft_other_services'],
            bldg_sqft_off=['bldg_sqft_office_services', 'bldg_sqft_public_admin', 'bldg_sqft_education',
                           'bldg_sqft_medical_services'],
            b1=['bldg_sqft_detsf_sl', 'bldg_sqft_detsf_ll', 'bldg_sqft_attsf', 'bldg_sqft_mf'],
            b2=['bldg_sqft_retail_services', 'bldg_sqft_other_services'],
            b3=['bldg_sqft_restaurant', 'bldg_sqft_arts_entertainment'],
            b4=['bldg_sqft_office_services'],
            b5=['bldg_sqft_public_admin'],
            du_1km_tr=['du'],
            resmix_dens=['acres_parcel_res', 'acres_parcel_mixed_use'],
            far_nonres=['acres_parcel_emp', 'acres_parcel_mixed_use'])
        ))

    aggregate_within_distance(dict(
        source_table=sql_config_dict['source_grid_schema'] + '.' + sql_config_dict['source_grid_table'],
        source_table_query='local_roads_length_feet + secondary_roads_length_feet + freeway_arterial_length_feet + acres_parcel_park_open_space > 0',
        source_geometry_column='analysis_geom',
        target_table_schema=sql_config_dict['public_health_variables_schema'],
        target_table=sql_config_dict['public_health_variables_table'],
        target_geometry_column='analysis_geom',
        target_table_query='pop > 0',
        target_table_pk='id',
        distance=1000,
        suffix='grid',
        aggregation_type='sum',
        variable_field_dict=dict(
            local_street=['local_roads_length_feet', 'secondary_roads_length_feet'],
            major_street=['freeway_arterial_length_feet'],
            acres_parcel_park_open_space=['acres_parcel_park_open_space'])
    ))

    aggregate_within_distance(dict(
        source_table=sql_config_dict['transit_stop_schema'] + '.' + sql_config_dict['transit_stop_table'],
        source_table_query='route_type = 0 or route_type = 1 or route_type = 2 or route_type = 3',
        source_geometry_column='analysis_geom',
        target_table_schema=sql_config_dict['public_health_variables_schema'],
        target_table=sql_config_dict['public_health_variables_table'],
        target_geometry_column='analysis_geom',
        target_table_query='pop > 0',
        target_table_pk='id',
        distance=1000,
        suffix='transit',
        aggregation_type='count',
        variable_field_dict=dict(
            transit_count=['wkb_geometry'])
    ))

    geom_analysis_tables = [
        (sql_config_dict['uf_canvas_schema'], sql_config_dict['uf_canvas_table']),
        (sql_config_dict['source_grid_schema'], sql_config_dict['source_grid_table']),
        (sql_config_dict['transit_stop_schema'], sql_config_dict['transit_stop_table']),
        (sql_config_dict['public_health_variables_schema'], sql_config_dict['public_health_variables_table'])
    ]

    for schema, table in geom_analysis_tables:
        execute_sql('''DROP INDEX {schema}.{schema}_{table}_analysis_geom;
            Alter Table {schema}.{table} drop column analysis_geom;'''.format(schema=schema, table=table))
Пример #31
0
def populate_grid_census_rate_attributes(sql_config_dict):

    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
        pop = b.pop,
        hh = b.hh,
        intersection_density_sqmi = b.intersection_density_sqmi,
        gender2 = (case when b.pop > 0 then b.pop_female_rate else 0 end),
        age_children = (case when b.pop > 0 then b.pop_avg_age5_11 else 0 end),
        age_teens = (case when b.pop > 0 then b.pop_avg_age12_17 else 0 end),
        age_adult = (case when b.pop > 0 then b.pop_avg_age18_64 else 0 end),
        age_seniors = (case when b.pop > 0 then b.pop_avg_age65_up else 0 end),
        racehisp1 = (case when b.pop > 0 then b.pop_white_rate else 0 end),
        racehisp2 = (case when b.pop > 0 then b.pop_black_rate else 0 end),
        racehisp4 = (case when b.pop > 0 then b.pop_asian_rate else 0 end),
        racehisp97 = (case when b.pop > 0 then b.pop_american_indian_rate + b.pop_hawaiian_islander_rate + b.pop_other_ethnicity_rate else 0 end),
        emply2 = (case when b.pop > 0 then b.pop_age16_up_rate * b.pop_unemployed_rate else 0 end),
        educa2 = (case when b.pop > 0 then b.pop_hs_diploma_rate else 0 end),
        educa3 = (case when b.pop > 0 then b.pop_assoc_some_coll_rate else 0 end),
        educa4 = (case when b.pop > 0 then b.pop_coll_degree_rate else 0 end),
        educa5 = (case when b.pop > 0 then b.pop_grad_degree_rate else 0 end),
        own2 = (case when b.hh > 0 then b.hh_rent_occ_rate else 0 end),
        hhveh = (case when b.hh > 0 then b.hh_agg_veh_rate else 0 end),
        hhsize = (case when b.hh > 0 then b.pop / b.hh else 0 end),
        incom2 = (case when b.hh > 0 then b.hh_inc_10_20_rate + b.hh_inc_20_30_rate + b.hh_inc_30_40_rate / 2  else 0 end),
        incom3 = (case when b.hh > 0 then hh_inc_30_40_rate / 2 + b.hh_inc_40_50_rate else 0 end),
        incom4 = (case when b.hh > 0 then hh_inc_50_60_rate + b.hh_inc_60_75_rate else 0 end),
        incom5 = (case when b.hh > 0 then hh_inc_75_100_rate else 0 end),
        incom6 = (case when b.hh > 0 then hh_inc_100_125_rate + b.hh_inc_125_150_rate else 0 end),
        incom7 = (case when b.hh > 0 then hh_inc_150_200_rate + b.hh_inc_200p_rate else 0 end),
        child_any1 = (case when b.hh > 0 then b.hh_with_children_under_18yr_rate else 0 end),
        disabled1_children = (case when b.pop > 0 then (b.pop_age5_17_disability_rate - b.pop_age5_17_ambulatory_disability_rate) / 2 else 0 end),
        disabled2_children = (case when b.pop > 0 then b.pop_age5_17_ambulatory_disability_rate / 2 else 0 end),
        disabled1_teens = (case when b.pop > 0 then (b.pop_age5_17_disability_rate - b.pop_age5_17_ambulatory_disability_rate) / 2 else 0 end),
        disabled2_teens = (case when b.pop > 0 then b.pop_age5_17_ambulatory_disability_rate / 2 else 0 end),
        disabled1_adult = (case when b.pop > 0 then b.pop_age18_64_disability_rate - b.pop_age18_64_ambulatory_disability_rate else 0 end),
        disabled2_adult = (case when b.pop > 0 then b.pop_age18_64_ambulatory_disability_rate else 0 end),
        disabled1_seniors = (case when b.pop > 0 then b.pop_age65up_disability_rate - b.pop_age65up_ambulatory_disability_rate else 0 end),
        disabled2_seniors = (case when b.pop > 0 then b.pop_age65up_ambulatory_disability_rate else 0 end),
        emply_hh = (case when b.hh > 0 then b.pop / b.hh * b.pop_employed_rate else 0 end),
        educa_hh2 = (case when b.pop > 0 then b.pop_hs_diploma_rate else 0 end),
        educa_hh3 = (case when b.pop > 0 then b.pop_assoc_some_coll_rate else 0 end),
        educa_hh4 = (case when b.pop > 0 then b.pop_coll_degree_rate else 0 end),
        educa_hh5 = (case when b.pop > 0 then b.pop_grad_degree_rate else 0 end),
        pop_age_children = (case when b.pop > 0 then pop_age_5_11_rate else 0 end),
        pop_age_teens = (case when b.pop > 0 then pop_age_12_17_rate else 0 end),
        pop_age_adult = (case when b.pop > 0 then pop_age_18_64_rate else 0 end),
        pop_age_seniors = (case when b.pop > 0 then pop_age65_up_rate else 0 end),

        -- unlike the fields above, these describe population counts for the analysis groups
        pop_children = (case when b.pop > 0 then b.pop * pop_age_5_11_rate else 0 end),
        pop_teen = (case when b.pop > 0 then b.pop * pop_age_12_17_rate else 0 end),
        pop_senior = (case when b.pop > 0 then b.pop * pop_age65_up_rate else 0 end),
        pop_adult = (case when b.pop > 0 then b.pop * pop_age_18_64_rate  else 0 end),

        pop_adult_low = (case when b.pop > 0 then b.pop * pop_age_18_64_rate * (hh_inc_00_10_rate + hh_inc_10_20_rate + hh_inc_20_30_rate + hh_inc_30_40_rate + hh_inc_40_50_rate) else 0 end),
        pop_adult_med = (case when b.pop > 0 then b.pop * pop_age_18_64_rate * (hh_inc_50_60_rate + hh_inc_60_75_rate + hh_inc_75_100_rate) else 0 end),
        pop_adult_high = (case when b.pop > 0 then b.pop * pop_age_18_64_rate * (hh_inc_100_125_rate + hh_inc_125_150_rate + hh_inc_150_200_rate + hh_inc_200p_rate) else 0 end)

    from (
        select a.grid_id, pop, hh, intersection_density_sqmi, c.*
        from (select grid_id, census_id, sum(pop * primary_proportion) as pop, sum(hh * primary_proportion) as hh,
        avg(intersection_density_sqmi) as intersection_density_sqmi
              from {public_health_variables_schema}.{source_grid_table}_join a
              left join {uf_canvas_schema}.{uf_canvas_table} b on a.primary_id = b.id group by grid_id, census_id) a
        LEFT JOIN {census_rate_schema}.{census_rate_table} c on a.census_id = c.id
    ) b where b.grid_id = a.id;
    '''.format(**sql_config_dict)

    execute_sql(pSql)
Пример #32
0
    def update(self, **kwargs):

        logger.debug("Executing Environmental Constraints using {0}".format(self.config_entity))
        config_entity = self.config_entity
        end_state_feature_class = config_entity.db_entity_feature_class(DbEntityKey.END_STATE)
        base_table = config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)

        options = dict(
            project_schema=parse_schema_and_table(base_table._meta.db_table)[0],
            scenario_schema=parse_schema_and_table(end_state_feature_class._meta.db_table)[0],
            end_state_table=end_state_feature_class.db_entity_key
        )

        current_db_entities, db_entities_to_add, db_entities_to_delete = \
            self.update_or_create_environmental_constraint_percents(config_entity)

        current_environmental_constraints = []
        logger.debug("Current db_entities {0}".format(current_db_entities))
        for db_entity in current_db_entities:
            logger.debug("Active db_entity {0}".format(db_entity.key))
            constraint_class = config_entity.db_entity_feature_class(db_entity.key)
            environmental_constraint_percent = EnvironmentalConstraintPercent.objects.filter(
                db_entity_id=db_entity.id,
                analysis_tool_id=self.id)[0]
            current_environmental_constraints.append(
                dict(
                    key=constraint_class.db_entity_key,
                    priority=environmental_constraint_percent.priority,
                    percent=environmental_constraint_percent.percent
                )
            )

        pSql = '''
        DO $$
            BEGIN
                BEGIN
                    ALTER TABLE {project_schema}.environmental_constraint_geographies_table_unioned ADD COLUMN constraint_acres_{config_entity_id} float;
                EXCEPTION
                    WHEN duplicate_column
                        THEN -- do nothing;
                END;
            END;
        $$'''.format(
            project_schema=options['project_schema'],
            config_entity_id=self.config_entity.id
        )
        execute_sql(pSql)

        logger.info('Calculate constraint acreage for the active scenario end state feature')
        for db_entity in current_db_entities:
            constraint_class = self.config_entity.db_entity_feature_class(db_entity.key)
            environmental_constraint_percent = EnvironmentalConstraintPercent.objects.filter(
                db_entity_id=db_entity.id,
                analysis_tool_id=self.id)[0]
            constraint_percent = environmental_constraint_percent.percent
            active_constraint = filter(lambda dct: constraint_class.db_entity_key in dct['key'], current_environmental_constraints)[0]
            priority_constraints = filter(lambda dct: dct['priority'] < active_constraint['priority'] or (dct['priority'] == active_constraint['priority'] and dct['percent'] > active_constraint['percent']), current_environmental_constraints)

            priority_key_list = []
            for constraint in priority_constraints:
                priority_key_list.append(constraint['key'])

            priority_query = create_sql_calculations(priority_key_list, ' and {0}_id is null', ' and a.primary_id is not null')

            pSql = '''
            update {project_schema}.environmental_constraint_geographies_table_unioned a set
                constraint_acres_{config_entity_id} = acres * {percent} where {constraint}_id = {constraint_id} {priority_query};
            '''.format(
                project_schema=options['project_schema'],
                constraint=constraint_class.db_entity_key,
                constraint_id=db_entity.id,
                percent=constraint_percent,
                priority_query=priority_query,
                config_entity_id=self.config_entity.id
            )

            execute_sql(pSql)

        pSql = '''
        update {scenario_schema}.{end_state_table} a set
            acres_developable = a.acres_gross - b.constraint_acres
            FROM
            (select primary_id,
                    sum(constraint_acres_{config_entity_id}) as constraint_acres
                from {project_schema}.environmental_constraint_geographies_table_unioned
                    where constraint_acres_{config_entity_id} is not null group by primary_id) b
        where a.id= b.primary_id;
        '''.format(
            scenario_schema=options['scenario_schema'],
            project_schema=options['project_schema'],
            end_state_table=options['end_state_table'],
            config_entity_id=self.config_entity.id
        )

        execute_sql(pSql)

        pSql = '''
        update {scenario_schema}.{end_state_table}
        set developable_proportion = (
            case when acres_gross > 0 then acres_developable / acres_gross else 0 end
        )
        '''.format(
            scenario_schema=options['scenario_schema'],
            end_state_table=options['end_state_table']
        )
        execute_sql(pSql)
Пример #33
0
def populate_transformations_and_indices(sql_config_dict):

    ##TRANSFORMING PRELIMINARY AND MODEL VARIABLES STEPS 1 AND 2
    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
        resmix_dens = (case
          when resmix_dens = 0
            then 0
          else ln((du_1km_tr / resmix_dens) + 1) end),
        bldg_sqft_ret = bldg_sqft_ret ^ 0.25,
        b1 = (case when b1 = 0 then 1E-24 else b1 end),
        b2 = (case when b2 = 0 then 1E-24 else b2 end),
        b3 = (case when b3 = 0 then 1E-24 else b3 end),
        b4 = (case when b4 = 0 then 1E-24 else b4 end),
        b5 = (case when b5 = 0 then 1E-24 else b5 end),
        local_street = local_street / 5280,
        major_street = (major_street / 5280) ^ 0.5,
        far_nonres = (case when far_nonres = 0 then 0 else ((bldg_sqft_ret1 + bldg_sqft_off) / (far_nonres * 43560)) ^ 0.25 end),
        rail_any = (case when rail_any > 0 then 1 else 0 end),
        freeway_arterial_any = (case when freeway_arterial_any <500 then 1 else 0 end),
        transit_distance = transit_distance ^ 0.25,
        transit_count = transit_count ^ 0.5,
        school_distance = school_distance ^ 0.25,
        retail_distance = retail_distance ^ 0.5,
        intersection_density_sqmi = intersection_density_sqmi ^ 0.5,
        park_open_space_distance = park_open_space_distance ^ 0.5,
        acres_parcel_park_open_space = acres_parcel_park_open_space ^ 0.25,
        du_variable = du_variable ^ 0.25,
        emp_variable = emp_variable ^ 0.25
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)

    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
    du_1km_tr = du_1km_tr ^ 0.25,
    a = b1 + b2 + b3 + b4 + b5
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)

    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
    mix5 = (case
          when a > 0 and ((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a)) / (-1 * ln(5)) < 0.0001
            then 0
          when a > 0 and ((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a)) / (-1 * ln(5)) > 0.999
            then 0
          when a > 0 and ((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a)) / (-1 * ln(5)) > 0.0001 and
            ((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a)) / (-1 * ln(5)) < 0.999
            then ((((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a))) / (-1 * ln(5))) ^ 0.5
          else 0
          end)
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)

    ##CALCULATING PRELIMINARY INDEX VARIABLES

    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
        res_index = (resmix_dens - 1.82) / 0.9478 + (du_1km_tr - 7.451) / 1.8664,
        com_index = 2 * ((bldg_sqft_ret - 24.38) / 10.4543) + 2 * ((far_nonres - 0.5938) / 0.2064) - (retail_distance - 13.91) / 13.5014 - (restaurant_distance - 599.4) / 670.6584,
        park_access = (acres_parcel_park_open_space - 1.704) / 1.0956 - (park_open_space_distance - 18.48) / 11.3701,
        regional_access = (du_variable - 24.92) / 6.0667 + (emp_variable - 26.13) / 7.2701,
        network_index = (intersection_density_sqmi - 9.186) / 3.5870 + (local_street - 29.73) / 10.5599,
        transit_access = 2*((transit_count - 3.769) / 3.9104) - (transit_distance - 4.602) / 1.8653,
        major_road_index = (major_street - 0.6099) / 0.8575 + (freeway_arterial_any - 0.2126) / 0.4092
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)


    ##CALCULATING WALK INDEX VARIABLE
    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
        walk_index = 2 * ((res_index - 0.000221) / 1.9586) + 1.5 * ((network_index - 1.65E-16) / 1.846573714) + (com_index - 0.0005076) / 5.3122 + 0.5*((mix5 - 0.4379) / 0.1840)
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)


    ##CALCULATING WALK INDEX VARIATIONS
    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
      walk_index_chts_senior_walk_any = (12.69/6.59) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_senior_auto_min = (7.09/2.63) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_teens_walk_any = (8.24/3.34) * ((walk_index - 0.0005914) / 4.4684) + (4.91/3.34) * ((transit_access - 2.727E-16) / 2.8399) + (rail_any - 0.09589) / 0.2944,
      walk_index_chts_child_walk_any = (8.94/5.81) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_adult_bike_any = (8.22/2.19) * ((walk_index - 0.0005914) / 4.4684) + (regional_access) / 1.9944,
      walk_index_chts_adult_walk_min = (9.79/5.56) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_senior_walk_min = (2.43/2.34) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_teens_walk_min = (5.12/2.42) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_adult_auto_min = (7.99/2.11) * ((walk_index - 0.0005914) / 4.4684) + (rail_any - 0.09589) / 0.2944,
      walk_index_chis_adult_modPA_any = (8.57/2.94) * ((walk_index - 0.0005914) / 4.4684) + (4.36/2.94) * ((transit_access - 2.727E-16) / 2.8399) + ((rail_any - 0.09589) / 0.2944) + (3.22/2.94) * ((regional_access) / 1.9944),
      walk_index_chis_adult_overweight = (walk_index - 0.0005914) / 4.4684 + (7.92/3.15) * ((regional_access) / 1.9944),
      walk_index_chis_senior_overweight = (walk_index - 0.0005914) / 4.4684 + (3.15/2.01) * ((regional_access) / 1.9944),
      walk_index_chis_adult_obese = (walk_index - 0.0005914) / 4.4684 + (6.07/2.58) * ((regional_access) / 1.9944),
      walk_index_chis_senior_gh = (2.74/2.14) * ((walk_index - 0.0005914) / 4.4684) + (regional_access) / 1.9944,
      walk_index_chis_senior_walk_le_min = (2.82/2.08) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chis_adult_modPA_min = (7.96/1.98) * ((walk_index - 0.0005914) / 4.4684) + (4.32/1.98) * ((transit_access - 2.727E-16) / 2.8399) + (rail_any - 0.09589) / 0.2944,
      walk_index_chis_adult_bmi = (walk_index - 0.0005914) / 4.4684 + (4.47/3.50) * ((transit_access - 2.727E-16) / 2.8399) + (8.94/3.50) * ((regional_access) / 1.9944),
      walk_index_chis_child_PA60 =(4.25/2.12) * ((walk_index - 0.0005914) / 4.4684) + (regional_access) / 1.9944,
      walk_index_chis_child_walk_school = (8.13/3.17)*((walk_index - 0.0005914) / 4.4684) + (regional_access) / 1.9944

    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)
Пример #34
0
def run_aggregate_within_distance_processes(sql_config_dict):

    aggregate_within_distance(
        dict(source_table=sql_config_dict['uf_canvas_schema'] + '.' +
             sql_config_dict['uf_canvas_table'],
             source_table_query='du + emp > 0',
             source_geometry_column='analysis_geom',
             target_table_schema=sql_config_dict[
                 'public_health_variables_schema'],
             target_table=sql_config_dict['public_health_variables_table'],
             target_geometry_column='analysis_geom',
             target_table_query='pop > 0',
             target_table_pk='id',
             distance=1000,
             suffix='bldg_sqft',
             aggregation_type='sum',
             variable_field_dict=dict(
                 bldg_sqft_res=[
                     'bldg_sqft_detsf_sl', 'bldg_sqft_detsf_ll',
                     'bldg_sqft_attsf', 'bldg_sqft_mf'
                 ],
                 bldg_sqft_ret1=[
                     'bldg_sqft_retail_services', 'bldg_sqft_restaurant',
                     'bldg_sqft_accommodation', 'bldg_sqft_arts_entertainment',
                     'bldg_sqft_other_services'
                 ],
                 bldg_sqft_ret=[
                     'bldg_sqft_retail_services', 'bldg_sqft_restaurant',
                     'bldg_sqft_arts_entertainment', 'bldg_sqft_other_services'
                 ],
                 bldg_sqft_off=[
                     'bldg_sqft_office_services', 'bldg_sqft_public_admin',
                     'bldg_sqft_education', 'bldg_sqft_medical_services'
                 ],
                 b1=[
                     'bldg_sqft_detsf_sl', 'bldg_sqft_detsf_ll',
                     'bldg_sqft_attsf', 'bldg_sqft_mf'
                 ],
                 b2=['bldg_sqft_retail_services', 'bldg_sqft_other_services'],
                 b3=['bldg_sqft_restaurant', 'bldg_sqft_arts_entertainment'],
                 b4=['bldg_sqft_office_services'],
                 b5=['bldg_sqft_public_admin'],
                 du_1km_tr=['du'],
                 resmix_dens=['acres_parcel_res', 'acres_parcel_mixed_use'],
                 far_nonres=['acres_parcel_emp', 'acres_parcel_mixed_use'])))

    aggregate_within_distance(
        dict(
            source_table=sql_config_dict['source_grid_schema'] + '.' +
            sql_config_dict['source_grid_table'],
            source_table_query=
            'local_roads_length_feet + secondary_roads_length_feet + freeway_arterial_length_feet + acres_parcel_park_open_space > 0',
            source_geometry_column='analysis_geom',
            target_table_schema=sql_config_dict[
                'public_health_variables_schema'],
            target_table=sql_config_dict['public_health_variables_table'],
            target_geometry_column='analysis_geom',
            target_table_query='pop > 0',
            target_table_pk='id',
            distance=1000,
            suffix='grid',
            aggregation_type='sum',
            variable_field_dict=dict(
                local_street=[
                    'local_roads_length_feet', 'secondary_roads_length_feet'
                ],
                major_street=['freeway_arterial_length_feet'],
                acres_parcel_park_open_space=['acres_parcel_park_open_space'
                                              ])))

    aggregate_within_distance(
        dict(
            source_table=sql_config_dict['transit_stop_schema'] + '.' +
            sql_config_dict['transit_stop_table'],
            source_table_query=
            'route_type = 0 or route_type = 1 or route_type = 2 or route_type = 3',
            source_geometry_column='analysis_geom',
            target_table_schema=sql_config_dict[
                'public_health_variables_schema'],
            target_table=sql_config_dict['public_health_variables_table'],
            target_geometry_column='analysis_geom',
            target_table_query='pop > 0',
            target_table_pk='id',
            distance=1000,
            suffix='transit',
            aggregation_type='count',
            variable_field_dict=dict(transit_count=['wkb_geometry'])))

    geom_analysis_tables = [(sql_config_dict['uf_canvas_schema'],
                             sql_config_dict['uf_canvas_table']),
                            (sql_config_dict['source_grid_schema'],
                             sql_config_dict['source_grid_table']),
                            (sql_config_dict['transit_stop_schema'],
                             sql_config_dict['transit_stop_table']),
                            (sql_config_dict['public_health_variables_schema'],
                             sql_config_dict['public_health_variables_table'])]

    for schema, table in geom_analysis_tables:
        execute_sql('''DROP INDEX {schema}.{schema}_{table}_analysis_geom;
            Alter Table {schema}.{table} drop column analysis_geom;'''.format(
            schema=schema, table=table))
Пример #35
0
def populate_grid_census_rate_attributes(sql_config_dict):

    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
        pop = b.pop,
        hh = b.hh,
        intersection_density_sqmi = b.intersection_density_sqmi,
        gender2 = (case when b.pop > 0 then b.pop_female_rate else 0 end),
        age_children = (case when b.pop > 0 then b.pop_avg_age5_11 else 0 end),
        age_teens = (case when b.pop > 0 then b.pop_avg_age12_17 else 0 end),
        age_adult = (case when b.pop > 0 then b.pop_avg_age18_64 else 0 end),
        age_seniors = (case when b.pop > 0 then b.pop_avg_age65_up else 0 end),
        racehisp1 = (case when b.pop > 0 then b.pop_white_rate else 0 end),
        racehisp2 = (case when b.pop > 0 then b.pop_black_rate else 0 end),
        racehisp4 = (case when b.pop > 0 then b.pop_asian_rate else 0 end),
        racehisp97 = (case when b.pop > 0 then b.pop_american_indian_rate + b.pop_hawaiian_islander_rate + b.pop_other_ethnicity_rate else 0 end),
        emply2 = (case when b.pop > 0 then b.pop_age16_up_rate * b.pop_unemployed_rate else 0 end),
        educa2 = (case when b.pop > 0 then b.pop_hs_diploma_rate else 0 end),
        educa3 = (case when b.pop > 0 then b.pop_assoc_some_coll_rate else 0 end),
        educa4 = (case when b.pop > 0 then b.pop_coll_degree_rate else 0 end),
        educa5 = (case when b.pop > 0 then b.pop_grad_degree_rate else 0 end),
        own2 = (case when b.hh > 0 then b.hh_rent_occ_rate else 0 end),
        hhveh = (case when b.hh > 0 then b.hh_agg_veh_rate else 0 end),
        hhsize = (case when b.hh > 0 then b.pop / b.hh else 0 end),
        incom2 = (case when b.hh > 0 then b.hh_inc_10_20_rate + b.hh_inc_20_30_rate + b.hh_inc_30_40_rate / 2  else 0 end),
        incom3 = (case when b.hh > 0 then hh_inc_30_40_rate / 2 + b.hh_inc_40_50_rate else 0 end),
        incom4 = (case when b.hh > 0 then hh_inc_50_60_rate + b.hh_inc_60_75_rate else 0 end),
        incom5 = (case when b.hh > 0 then hh_inc_75_100_rate else 0 end),
        incom6 = (case when b.hh > 0 then hh_inc_100_125_rate + b.hh_inc_125_150_rate else 0 end),
        incom7 = (case when b.hh > 0 then hh_inc_150_200_rate + b.hh_inc_200p_rate else 0 end),
        child_any1 = (case when b.hh > 0 then b.hh_with_children_under_18yr_rate else 0 end),
        disabled1_children = (case when b.pop > 0 then (b.pop_age5_17_disability_rate - b.pop_age5_17_ambulatory_disability_rate) / 2 else 0 end),
        disabled2_children = (case when b.pop > 0 then b.pop_age5_17_ambulatory_disability_rate / 2 else 0 end),
        disabled1_teens = (case when b.pop > 0 then (b.pop_age5_17_disability_rate - b.pop_age5_17_ambulatory_disability_rate) / 2 else 0 end),
        disabled2_teens = (case when b.pop > 0 then b.pop_age5_17_ambulatory_disability_rate / 2 else 0 end),
        disabled1_adult = (case when b.pop > 0 then b.pop_age18_64_disability_rate - b.pop_age18_64_ambulatory_disability_rate else 0 end),
        disabled2_adult = (case when b.pop > 0 then b.pop_age18_64_ambulatory_disability_rate else 0 end),
        disabled1_seniors = (case when b.pop > 0 then b.pop_age65up_disability_rate - b.pop_age65up_ambulatory_disability_rate else 0 end),
        disabled2_seniors = (case when b.pop > 0 then b.pop_age65up_ambulatory_disability_rate else 0 end),
        emply_hh = (case when b.hh > 0 then b.pop / b.hh * b.pop_employed_rate else 0 end),
        educa_hh2 = (case when b.pop > 0 then b.pop_hs_diploma_rate else 0 end),
        educa_hh3 = (case when b.pop > 0 then b.pop_assoc_some_coll_rate else 0 end),
        educa_hh4 = (case when b.pop > 0 then b.pop_coll_degree_rate else 0 end),
        educa_hh5 = (case when b.pop > 0 then b.pop_grad_degree_rate else 0 end),
        pop_age_children = (case when b.pop > 0 then pop_age_5_11_rate else 0 end),
        pop_age_teens = (case when b.pop > 0 then pop_age_12_17_rate else 0 end),
        pop_age_adult = (case when b.pop > 0 then pop_age_18_64_rate else 0 end),
        pop_age_seniors = (case when b.pop > 0 then pop_age65_up_rate else 0 end),

        -- unlike the fields above, these describe population counts for the analysis groups
        pop_children = (case when b.pop > 0 then b.pop * pop_age_5_11_rate else 0 end),
        pop_teen = (case when b.pop > 0 then b.pop * pop_age_12_17_rate else 0 end),
        pop_senior = (case when b.pop > 0 then b.pop * pop_age65_up_rate else 0 end),
        pop_adult = (case when b.pop > 0 then b.pop * pop_age_18_64_rate  else 0 end),

        pop_adult_low = (case when b.pop > 0 then b.pop * pop_age_18_64_rate * (hh_inc_00_10_rate + hh_inc_10_20_rate + hh_inc_20_30_rate + hh_inc_30_40_rate + hh_inc_40_50_rate) else 0 end),
        pop_adult_med = (case when b.pop > 0 then b.pop * pop_age_18_64_rate * (hh_inc_50_60_rate + hh_inc_60_75_rate + hh_inc_75_100_rate) else 0 end),
        pop_adult_high = (case when b.pop > 0 then b.pop * pop_age_18_64_rate * (hh_inc_100_125_rate + hh_inc_125_150_rate + hh_inc_150_200_rate + hh_inc_200p_rate) else 0 end)

    from (
        select a.grid_id, pop, hh, intersection_density_sqmi, c.*
        from (select grid_id, census_id, sum(pop * primary_proportion) as pop, sum(hh * primary_proportion) as hh,
        avg(intersection_density_sqmi) as intersection_density_sqmi
              from {public_health_variables_schema}.{source_grid_table}_join a
              left join {uf_canvas_schema}.{uf_canvas_table} b on a.primary_id = b.id group by grid_id, census_id) a
        LEFT JOIN {census_rate_schema}.{census_rate_table} c on a.census_id = c.id
    ) b where b.grid_id = a.id;
    '''.format(**sql_config_dict)

    execute_sql(pSql)
Пример #36
0
def run_distance_variables_processes(sql_config_dict):

    geom_analysis_tables = [(sql_config_dict['uf_canvas_schema'],
                             sql_config_dict['uf_canvas_table']),
                            (sql_config_dict['source_grid_schema'],
                             sql_config_dict['source_grid_table']),
                            (sql_config_dict['transit_stop_schema'],
                             sql_config_dict['transit_stop_table']),
                            (sql_config_dict['public_health_variables_schema'],
                             sql_config_dict['public_health_variables_table'])]

    add_analysis_geom = '''
    alter table {schema}.{table} drop column if exists analysis_geom cascade;
    alter table {schema}.{table} add column analysis_geom geometry;
    update {schema}.{table} set analysis_geom = st_setSRID(st_transform(wkb_geometry, 3310), 3310);
    create index {schema}_{table}_analysis_geom on {schema}.{table} using gist (analysis_geom);'''

    for schema, table in geom_analysis_tables:
        execute_sql(add_analysis_geom.format(schema=schema, table=table))

    ph_distance_calcs = dict(
        source_geometry_column='analysis_geom',
        target_table_schema=sql_config_dict['public_health_variables_schema'],
        target_table=sql_config_dict['public_health_variables_table'],
        target_table_pk='id',
        target_table_query='pop > 0',
        target_geometry_column='analysis_geom',
        maximum_distance=2000)

    calculate_distance(
        merge(
            ph_distance_calcs,
            dict(source_table=sql_config_dict['uf_canvas_schema'] + '.' +
                 sql_config_dict['uf_canvas_table'],
                 source_table_query='emp_education > 0',
                 column='school_distance')))

    calculate_distance(
        merge(
            ph_distance_calcs,
            dict(source_table=sql_config_dict['uf_canvas_schema'] + '.' +
                 sql_config_dict['uf_canvas_table'],
                 source_table_query='emp_restaurant > 0',
                 column='restaurant_distance')))

    calculate_distance(
        merge(
            ph_distance_calcs,
            dict(source_table=sql_config_dict['uf_canvas_schema'] + '.' +
                 sql_config_dict['uf_canvas_table'],
                 source_table_query='emp_retail_services > 0',
                 column='retail_distance')))

    calculate_distance(
        merge(
            ph_distance_calcs,
            dict(source_table=sql_config_dict['source_grid_schema'] + '.' +
                 sql_config_dict['source_grid_table'],
                 source_table_query='acres_parcel_park_open_space > 0',
                 column='park_open_space_distance')))

    calculate_distance(
        merge(
            ph_distance_calcs,
            dict(source_table=sql_config_dict['source_grid_schema'] + '.' +
                 sql_config_dict['source_grid_table'],
                 source_table_query='freeway_arterial_length_feet > 0',
                 maximum_distance=500,
                 column='freeway_arterial_any')))

    calculate_distance(
        merge(
            ph_distance_calcs,
            dict(
                source_table=sql_config_dict['transit_stop_schema'] + '.' +
                sql_config_dict['transit_stop_table'],
                source_table_query=
                'route_type = 0 or route_type = 1 or route_type = 2 or route_type = 3',
                column='transit_distance')))
Пример #37
0
    def update(self, **kwargs):

        """
            This function handles the update or creation on the environmental constraints geography producing the area
            for each layer with the environmental constraint behavior. This function will both add and remove
            constraints and produce the final constraints layer in the primary geography of the active scenario
        """
        # TODO : remove hard-coded 3310 (only works in CA), need to set an "analysis projection" in the Region
        start_time = time.time()

        current_db_entities = \
            set(self.config_entity.db_entities_having_behavior_key(BehaviorKey.Fab.ricate('environmental_constraint')))

        base_feature_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.BASE_CANVAS)

        options = dict(
            project_schema=parse_schema_and_table(base_feature_class._meta.db_table)[0],
            base_table=base_feature_class.db_entity_key
        )

        logger.info('Inserting raw geographies into the environmental constraint geographies table for DbEntities: %s' % \
                    ', '.join(map(lambda db_entity: db_entity.name, current_db_entities)))

        drop_table('{project_schema}.environmental_constraint_geographies_table'.format(
            project_schema=options['project_schema'])
        )

        current_environmental_constraints = []
        for db_entity in current_db_entities:
            constraint_class = self.config_entity.db_entity_feature_class(db_entity.key)
            current_environmental_constraints.append(constraint_class.db_entity_key)

        create_id_field_format = create_sql_calculations(current_environmental_constraints, '{0}_id int')
        insert_id_field_format = create_sql_calculations(current_environmental_constraints, '{0}_id')

        pSql = '''
        create table {project_schema}.environmental_constraint_geographies_table
            (primary_id integer, wkb_geometry geometry, {create_id_field_format});
        SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table', 'wkb_geometry', 3310)

        '''.format(project_schema=options['project_schema'], create_id_field_format=create_id_field_format)

        execute_sql(pSql)

        for db_entity in current_db_entities:
            logger.info('Inserting into environmental constraint geographies table for DbEntity: %s' % db_entity.full_name)

            constraint_class = self.config_entity.db_entity_feature_class(db_entity.key)

            pSql = '''
                insert into {project_schema}.environmental_constraint_geographies_table (primary_id, wkb_geometry, {constraint_db_entity_key}_id) select
                    cast(primary_id as int), wkb_geometry, {constraint_db_entity_key}_id from (
                    select
                        id as primary_id,
                        {constraint_db_entity_id} as {constraint_db_entity_key}_id,
                        st_setSRID(st_transform(st_buffer((st_dump(wkb_geometry)).geom, 0), 3310), 3310) as wkb_geometry

                    from (
                        select b.id, st_intersection(a.wkb_geometry, b.wkb_geometry) as wkb_geometry
	                    from {constraint_schema}.{constraint_db_entity_key} a,
                        {project_schema}.{base_table} b
                            where st_intersects(a.wkb_geometry, b.wkb_geometry)) as intersection
                    ) as polygons;
                '''.format(
                project_schema=options['project_schema'],
                base_table=options['base_table'],
                constraint_schema=parse_schema_and_table(constraint_class._meta.db_table)[0],
                constraint_db_entity_key=constraint_class.db_entity_key,
                constraint_db_entity_id=db_entity.id
            )

            execute_sql(pSql)

            logger.info('finished inserting db_entity: {db_entity} {time} elapsed'.format(
                time=time.time() - start_time,
                db_entity=constraint_class.db_entity_key))

        #only regenerate the merged environmental constraint whenever an envrionmental constraint is added or removed
        # from the layer

        add_geom_idx(options['project_schema'], 'environmental_constraint_geographies_table')

        logger.info('Unioning all environmental constraint geographies')
        drop_table('{project_schema}.environmental_constraint_geographies_table_unioned'.format(
            project_schema=options['project_schema'])
        )

        pSql = '''
            CREATE TABLE {project_schema}.environmental_constraint_geographies_table_unioned
                (id serial, wkb_geometry geometry, acres float, primary_id int, {create_id_field_format});
            SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table_unioned', 'wkb_geometry', 3310);
        '''.format(project_schema=options['project_schema'], create_id_field_format=create_id_field_format)

        execute_sql(pSql)

        pSql = '''
        insert into {project_schema}.environmental_constraint_geographies_table_unioned (wkb_geometry, acres, primary_id, {insert_id_field_format})
               SELECT
                    st_buffer(wkb_geometry, 0) as wkb_geometry,
                    st_area(st_buffer(wkb_geometry, 0)) * 0.000247105 as acres,
                    primary_id, {insert_id_field_format}

                    FROM (
                        SELECT
                            (ST_Dump(wkb_geometry)).geom as wkb_geometry,
                            primary_id, {insert_id_field_format}

                        FROM (
                            SELECT ST_Polygonize(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}   FROM (
                                SELECT ST_Collect(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}   FROM (
                                    SELECT ST_ExteriorRing(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}
                                        FROM {project_schema}.environmental_constraint_geographies_table) AS lines
                                            group by primary_id, {insert_id_field_format}) AS noded_lines
                                                group by primary_id, {insert_id_field_format}) as polygons
                    ) as final
                WHERE st_area(st_buffer(wkb_geometry, 0)) > 5;'''.format(
            project_schema=options['project_schema'],
            insert_id_field_format=insert_id_field_format
        )

        execute_sql(pSql)

        logger.info('finished unioning env constraints: {time} elapsed'.format(
            time=time.time() - start_time))

        #reproject table back to 4326 for integration with web viewing
        pSql = '''
        SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table_unioned', 'wkb_geometry', 4326);
        update {project_schema}.environmental_constraint_geographies_table_unioned a set wkb_geometry = st_transform(st_buffer(wkb_geometry, 0), 4326);
        '''.format(
            project_schema=options['project_schema']
        )
        execute_sql(pSql)

        add_geom_idx(options['project_schema'], 'environmental_constraint_geographies_table_unioned')

        logger.info('Env Union Finished: %s' % str(time.time() - start_time))
def calculate_distance(distance_options):
    print 'Calculating distance from target features areas'

    ##ST_DISTANCE returns distances in meters from geometries in WGS84 projection if set to false

    thread_count = count_cores()
    queue = queue_process()

    #if the source table query has not results set all values to the max and break
    zero_values_check = report_sql_values(
        '''select
              sum(*)
              from {source_table} where {source_table_query};'''.format(**distance_options), 'fetchone')

    if len(zero_values_check) == 0:
        pSql = '''
        update {target_table_schema}.{target_table} a set
          {column} = {maximum_distance}
          where {target_table_query} and {column} = 0
        '''.format(**distance_options)

        execute_sql(pSql)
        return


    pSql = '''drop function if exists distance_tool(
      in_id int,
      in_wkb_geometry geometry,
      out id int,
      out {column} float) cascade;'''.format(**distance_options)

    execute_sql(pSql)

    pSql = '''
    CREATE OR REPLACE FUNCTION distance_tool(
      in_id int,
      in_wkb_geometry geometry,
      out id int,
      out {column} float)
    AS
    $$
      select
        $1 as id,
        cast(st_distance(st_centroid($2), st_centroid(ref.geometry)) as float) as {column}
        from
            (select *, {source_geometry_column} as geometry from {source_table}) ref
        where ST_DWITHIN($2, ref.geometry, {maximum_distance}) and ({source_table_query})
                order by {column};
    $$
    COST 10000
    language SQL STABLE strict;
    '''.format(**distance_options)

    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{column}'.format(**distance_options))

    pSql = '''
      create table {target_table_schema}.{target_table}_{column} (id int, {column} float);
    '''.format(**distance_options)

    execute_sql(pSql)

    id_list = flatten(report_sql_values(
        '''select cast({target_table_pk} as int) from {target_table_schema}.{target_table}
            where {target_table_query} order by {target_table_pk}'''.format(
        **distance_options), 'fetchall'))

    insert_sql = '''
    insert into {target_table_schema}.{target_table}_{column}
      select (f).* from (
          select distance_tool(a.id, a.wkb_geometry) as f
          from (select {target_table_pk} as id, {target_geometry_column} as wkb_geometry
                from {target_table_schema}.{target_table}
                where
                    {target_table_pk} >= {bottom_range_id} and
                    {target_table_pk} <= {top_range_id} and
                    ({target_table_query})
                ) a
          ) s;
    '''.format(bottom_range_id="{start_id}", top_range_id="{end_id}", **distance_options)

    for i in range(thread_count):
        t = MultithreadProcess(queue, insert_sql)
        t.setDaemon(True)
        t.start()

    #populate queue with data
    rows_per_thread = len(id_list) / thread_count
    offset = 0

    for i in range(thread_count):
        if i == thread_count - 1:
            ## last bucket gets any remainder, too
            last_thread = len(id_list) - 1
        else:
            last_thread = offset + rows_per_thread - 1

        rows_to_process = {
            'start_id': id_list[offset],
            'end_id': id_list[last_thread]
        }
        offset += rows_per_thread
        queue.put(rows_to_process)

    #wait on the queue until everything has been processed
    queue.join()

    add_attribute_idx(distance_options['target_table_schema'],
                      '{target_table}_{column}'.format(**distance_options), 'id')

    pSql = '''
    update {target_table_schema}.{target_table} a set
      {column} = source_column
        from (select id as source_id, {column} as source_column from {target_table_schema}.{target_table}_{column})  b
            where cast(a.{target_table_pk} as int) = b.source_id and ({target_table_query})
    '''.format(**distance_options)
    execute_sql(pSql)

    pSql = '''
    update {target_table_schema}.{target_table} a set
      {column} = {maximum_distance}
      where {target_table_query} and {column} = 0
    '''.format(**distance_options)

    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{column}'.format(**distance_options))
def aggregate_within_variable_distance(distance_options):

    thread_count = count_cores()
    queue = queue_process()

    sql_format = 'out {formatter} float'.format(formatter="{0}")
    output_field_format = create_sql_calculations(distance_options['variable_field_list'], sql_format, ', ')

    sql_format = 'cast({aggregation_type}({formatter}) as float) as {formatter}'.format(formatter="{0}", aggregation_type=distance_options['aggregation_type'])
    sql_calculations_format = create_sql_calculations(distance_options['variable_field_list'], sql_format, ', ')

    pSql = '''
    drop function if exists aggregate_within_variable_distance_tool(
      in_id int,
      in_distance float,
      in_geometry geometry,
      out id int,
      out wkb_geometry geometry,
      {output_field_format}) cascade;'''.format(
        output_field_format=output_field_format)

    execute_sql(pSql)

    pSql = '''
    CREATE OR REPLACE FUNCTION aggregate_within_variable_distance_tool(
      in_id int,
      id_distance float,
      in_geometry geometry,
      out id int,
      out wkb_geometry geometry,
      {output_field_format})
    AS
    $$
      select
        $1 as id,
        $3 as wkb_geometry,
        {sql_calculations_format}

    from {source_table} ref
        WHERE st_dwithin($3, ref.wkb_geometry, $2) and (ref.{source_table_query});
    $$
    COST 10000
    language SQL STABLE strict;
    '''.format(source_table=distance_options['source_table'],
               source_table_query=distance_options['source_table_query'],
               output_field_format=output_field_format,
               sql_calculations_format=sql_calculations_format)

    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{suffix}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'], suffix=distance_options['suffix']))

    sql_format = '{formatter} float'.format(formatter="{0}", suffix=distance_options['suffix'])
    output_table_field_format = create_sql_calculations(distance_options['variable_field_list'], sql_format, ', ')

    pSql = '''create table {target_table_schema}.{target_table}_{suffix} (id int, wkb_geometry geometry, {output_table_field_format});'''.format(
        target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'],
        suffix=distance_options['suffix'], output_table_field_format=output_table_field_format)

    execute_sql(pSql)

    pSql = 'select cast(id as int) from {source_table} where id is not null order by id'.format(
        source_table=distance_options['source_table'])

    id_list = flatten(report_sql_values(pSql, 'fetchall'))

    insert_sql = '''
    insert into {target_table_schema}.{target_table}_{suffix}
      select (f).* from (
          select aggregate_within_variable_distance_tool(id, distance, wkb_geometry) as f
          from {source_table}
          where id >= {bottom_range_id} and id <= {top_range_id} and {source_table_query}
          ) s
    where (f).id is not null;
    '''.format(
        target_table_schema=distance_options['target_table_schema'],
        source_table_query=distance_options['source_table_query'],
        target_table=distance_options['target_table'],
        source_table=distance_options['source_table'],
        suffix=distance_options['suffix'],
        bottom_range_id="{start_id}",
        top_range_id="{end_id}")

    for i in range(thread_count):
        t = MultithreadProcess(queue, insert_sql)
        t.setDaemon(True)
        t.start()

    #populate queue with data
    rows_per_thread = len(id_list) / thread_count
    offset = 0

    for i in range(thread_count):
        if i == thread_count - 1:
            ## last bucket gets any remainder, too
            last_thread = len(id_list) - 1
        else:
            last_thread = offset + rows_per_thread - 1

        rows_to_process = {
            'start_id': id_list[offset],
            'end_id': id_list[last_thread]
        }
        offset += rows_per_thread
        queue.put(rows_to_process)

    #wait on the queue until everything has been processed
    queue.join()

    add_attribute_idx(distance_options['target_table_schema'],
                      '{target_table}_{suffix}'.format(target_table=distance_options['target_table'],
                                                       suffix=distance_options['suffix']), 'id')

    update_table_field_format = create_sql_calculations(distance_options['variable_field_list'], '{0} = (case when b.{0}_var is null then 0 else b.{0}_var end)', ', ')
    select_format = create_sql_calculations(distance_options['variable_field_list'], '{0} as {0}_var', ', ')

    pSql = '''
    update {target_table_schema}.{target_table} a set {update_table_field_format}
        from (select id as {suffix}_id, wkb_geometry, {select_format} from {target_table_schema}.{target_table}_{suffix}) b
            where st_intersects(st_centroid(a.analysis_geom), b.wkb_geometry) and {target_table_query};
    '''.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        target_table_query=distance_options['target_table_query'],
        target_table_pk=distance_options['target_table_pk'],
        update_table_field_format=update_table_field_format,
        select_format=select_format,
        suffix=distance_options['suffix']
    )
    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{suffix}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        suffix=distance_options['suffix']))
Пример #40
0
def run_vmt_variable_trip_length_buffers(sql_config_dict):

    drop_table('{vmt_variables_schema}.{vmt_variables_table}_vmt_variable'.format(
        vmt_variables_schema=sql_config_dict['vmt_variables_schema'],
        vmt_variables_table=sql_config_dict['vmt_variables_table']))

    pSql = '''
    create table {vmt_variables_schema}.{vmt_variables_table}_vmt_variable
    as select
      a.id, st_transform(a.wkb_geometry, 3310) as wkb_geometry, cast(a.attractions_hbw * 1609.0 as float) as distance,
      sum(acres_parcel_res) as acres_parcel_res_vb,
      sum(acres_parcel_emp) as acres_parcel_emp_vb,
      sum(acres_parcel_mixed_use) as acres_parcel_mixed_use_vb,
      sum(pop) as pop_vb,
      sum(hh) as hh_vb,
      sum(du) as du_vb,
      sum(du_mf) as du_mf_vb,
      sum(emp) as emp_vb,
      sum(emp_ret) as emp_ret_vb,
      sum(hh_inc_00_10) as hh_inc_00_10_vb,
      sum(hh_inc_10_20) as hh_inc_10_20_vb,
      sum(hh_inc_20_30) as hh_inc_20_30_vb,
      sum(hh_inc_30_40) as hh_inc_30_40_vb,
      sum(hh_inc_40_50) as hh_inc_40_50_vb,
      sum(hh_inc_50_60) as hh_inc_50_60_vb,
      sum(hh_inc_60_75) as hh_inc_60_75_vb,
      sum(hh_inc_75_100) as hh_inc_75_100_vb,
      sum(hh_inc_100p) as hh_inc_100p_vb,
      sum(pop_employed) as pop_employed_vb,
      sum(pop_age16_up) as pop_age16_up_vb,
      sum(pop_age65_up) as pop_age65_up_vb

    from
      (select id, wkb_geometry, attractions_hbw from {trip_lengths_schema}.{trip_lengths_table}) a,
      (select point, acres_parcel_res, acres_parcel_emp, acres_parcel_mixed_use, pop, hh, du, du_mf, emp, emp_ret,
            hh * hh_inc_00_10_rate as hh_inc_00_10,
            hh * hh_inc_10_20_rate as hh_inc_10_20,
            hh * hh_inc_20_30_rate as hh_inc_20_30,
            hh * hh_inc_30_40_rate as hh_inc_30_40,
            hh * hh_inc_40_50_rate as hh_inc_40_50,
            hh * hh_inc_50_60_rate as hh_inc_50_60,
            hh * hh_inc_60_75_rate as hh_inc_60_75,
            hh * hh_inc_75_100_rate as hh_inc_75_100,
            hh * hh_inc_100p_rate as hh_inc_100p,
            pop * pop_age16_up_rate * pop_employed_rate as pop_employed,
            pop * pop_age16_up_rate as pop_age16_up,
            pop * pop_age65_up_rate as pop_age65_up

        from (select st_centroid(wkb_geometry) as point, pop, hh, du, du_mf, emp, emp_ret, acres_parcel_res,
        acres_parcel_emp, acres_parcel_mixed_use
        from {uf_canvas_schema}.{uf_canvas_table}) a,
             (select wkb_geometry, hh_inc_00_10_rate, hh_inc_10_20_rate, hh_inc_20_30_rate,
              hh_inc_30_40_rate, hh_inc_40_50_rate, hh_inc_50_60_rate, hh_inc_60_75_rate,
              hh_inc_75_100_rate, hh_inc_100_125_rate + hh_inc_125_150_rate + hh_inc_150_200_rate + hh_inc_200p_rate as hh_inc_100p_rate,
              pop_employed_rate, pop_age16_up_rate, pop_age65_up_rate from {census_rates_schema}.{census_rates_table}) c
          where st_intersects(point, c.wkb_geometry)
          ) b
    where st_intersects(point, a.wkb_geometry) group by a.id, a.wkb_geometry, a.attractions_hbw;
    '''.format(vmt_variables_schema=sql_config_dict['vmt_variables_schema'],
               vmt_variables_table=sql_config_dict['vmt_variables_table'],
               uf_canvas_schema=sql_config_dict['uf_canvas_schema'],
               uf_canvas_table=sql_config_dict['uf_canvas_table'],
               census_rates_schema=sql_config_dict['census_rates_schema'],
               census_rates_table=sql_config_dict['census_rates_table'],
               trip_lengths_schema=sql_config_dict['trip_lengths_schema'],
               trip_lengths_table=sql_config_dict['trip_lengths_table'])

    execute_sql(pSql)

    add_geom_idx(sql_config_dict['vmt_variables_schema'], sql_config_dict['vmt_variables_table'] + '_vmt_variable')
    add_primary_key(sql_config_dict['vmt_variables_schema'], sql_config_dict['vmt_variables_table'] + '_vmt_variable', 'id')

    aggregate_within_variable_distance(dict(
        source_table=sql_config_dict['vmt_variables_schema'] + '.' + sql_config_dict['vmt_variables_table'] + '_vmt_variable',
        source_table_query='du_vb + emp_vb > 0',
        target_table_schema=sql_config_dict['vmt_variables_schema'],
        target_table=sql_config_dict['vmt_variables_table'],
        target_table_query='id is not null',
        target_table_pk='id',
        suffix='vmt_vb',
        aggregation_type='sum',
        variable_field_list=['acres_parcel_res_vb', 'acres_parcel_emp_vb', 'acres_parcel_mixed_use_vb', 'du_vb', 'pop_vb',
                             'emp_vb', 'emp_ret_vb', 'hh_vb', 'du_mf_vb', 'hh_inc_00_10_vb', 'hh_inc_10_20_vb',
                             'hh_inc_20_30_vb', 'hh_inc_30_40_vb', 'hh_inc_40_50_vb', 'hh_inc_50_60_vb',
                             'hh_inc_60_75_vb', 'hh_inc_75_100_vb', 'hh_inc_100p_vb', 'pop_employed_vb',
                             'pop_age16_up_vb', 'pop_age65_up_vb']
    ))

    pSql = '''DROP INDEX {schema}.{schema}_{table}_analysis_geom;
    Alter Table {schema}.{table} drop column analysis_geom;'''.format(schema=sql_config_dict['vmt_variables_schema'],
                                                                   table=sql_config_dict['vmt_variables_table'])
    execute_sql(pSql)
Пример #41
0
    def update(self, **kwargs):

        # Make sure all related models have been created before querying
        logger.info("Executing Vmt using {0}".format(self.config_entity))

        self.vmt_progress(0.1, **kwargs)

        vmt_result_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.VMT)
        vmt_variables_feature_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.VMT_VARIABLES)
        census_rates_feature_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.CENSUS_RATES)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            scenario_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.END_STATE)
            trip_lengths_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.VMT_FUTURE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.FUTURE_TRANSIT_STOPS)
            is_future = True
        else:
            scenario_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.BASE_CANVAS)
            trip_lengths_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.VMT_BASE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.BASE_TRANSIT_STOPS)
            is_future = False

        sql_config_dict = dict(
            vmt_result_table=vmt_result_class.db_entity_key,
            vmt_schema=parse_schema_and_table(
                vmt_result_class._meta.db_table)[0],
            uf_canvas_table=scenario_class.db_entity_key,
            uf_canvas_schema=parse_schema_and_table(
                scenario_class._meta.db_table)[0],
            census_rates_table=census_rates_feature_class.db_entity_key,
            census_rates_schema=parse_schema_and_table(
                census_rates_feature_class._meta.db_table)[0],
            trip_lengths_table=trip_lengths_class.db_entity_key,
            trip_lengths_schema=parse_schema_and_table(
                trip_lengths_class._meta.db_table)[0],
            vmt_variables_table=vmt_variables_feature_class.db_entity_key,
            vmt_variables_schema=parse_schema_and_table(
                vmt_variables_feature_class._meta.db_table)[0],
            vmt_rel_table=parse_schema_and_table(
                vmt_result_class._meta.db_table)[1],
            vmt_rel_column=vmt_result_class._meta.parents.values()[0].column,
            transit_stop_schema=parse_schema_and_table(
                transit_stop_class._meta.db_table)[0],
            transit_stop_table=transit_stop_class.db_entity_key,
            config_entity=self.config_entity)
        #
        if not kwargs.get('postprocess_only'):
            self.run_vmt_preprocesses(sql_config_dict, **kwargs)

        drop_table('{vmt_schema}.{vmt_result_table}'.format(**sql_config_dict))
        truncate_table(
            '{vmt_schema}.{vmt_rel_table}'.format(**sql_config_dict))

        attribute_list = filter(lambda x: x != 'id', vmt_output_field_list)
        output_field_syntax = 'id int, ' + create_sql_calculations(
            attribute_list, '{0} numeric(14, 4)')

        pSql = '''
        create table {vmt_schema}.{vmt_result_table} ({output_field_syntax});'''.format(
            output_field_syntax=output_field_syntax, **sql_config_dict)
        execute_sql(pSql)

        trip_lengths = DbEntityKey.VMT_FUTURE_TRIP_LENGTHS if is_future else DbEntityKey.VMT_BASE_TRIP_LENGTHS
        total_employment = scenario_class.objects.aggregate(Sum('emp'))
        all_features = scenario_class.objects.filter(
            Q(du__gt=0) | Q(emp__gt=0))
        all_features_length = len(all_features)

        max_id = scenario_class.objects.all().order_by("-id")[0].id
        min_id = scenario_class.objects.all().order_by("id")[0].id

        # This section of the model passes data from POSTGRES into Python and is saved in memory before being committed
        # back to the database. In order to not use all memory with large datasets, jobs are broken up with a maximum
        # job size of JOB_SIZE rows before being committed to the database. It will iterate through until all rows are
        # calculated and committed.
        if all_features_length > self.JOB_SIZE:
            job_count = all_features_length / self.JOB_SIZE
            rows_per_range = (max_id - min_id) / job_count
        else:
            rows_per_range = max_id - min_id
            job_count = 1
        print 'Job Count: {0}'.format(job_count)
        start_id = min_id

        for i in range(job_count):
            if i == job_count - 1:
                end_id = max_id
            else:
                end_id = start_id + rows_per_range - 1
            logger.info('Job: {0}'.format(i))
            logger.info('Start Id: {0}'.format(start_id))
            logger.info('End Id: {0}'.format(end_id))

            vmt_output_list = []

            features = all_features.filter(id__range=(start_id, end_id))
            annotated_features = annotated_related_feature_class_pk_via_geographies(
                features, self.config_entity, [
                    DbEntityKey.VMT_VARIABLES, DbEntityKey.CENSUS_RATES,
                    DbEntityKey.VMT_FUTURE_TRIP_LENGTHS,
                    DbEntityKey.VMT_BASE_TRIP_LENGTHS, trip_lengths
                ])

            assert annotated_features.exists(
            ), "VMT is about to process 0 results"

            failed_features = []

            for feature in annotated_features:
                trip_length_id = feature.vmt_future_trip_lengths if is_future else feature.vmt_base_trip_lengths
                try:
                    trip_lengths_feature = trip_lengths_class.objects.get(
                        id=trip_length_id)
                except trip_lengths_class.DoesNotExist, e:
                    failed_features.append(feature)
                    logger.error(
                        'Cannot find trip lengths for geography with id = {0}'.
                        format(feature.id))
                    continue

                vmt_variables_feature = vmt_variables_feature_class.objects.get(
                    id=feature.vmt_variables)

                try:
                    census_rates_feature = census_rates_feature_class.objects.get(
                        id=feature.census_rates)
                except census_rates_feature_class.DoesNotExist, e:
                    logger.error(
                        'Cannot find census rate with id = {0}'.format(
                            feature.census_rates))
                    continue

                vmt_feature = dict(
                    id=int(feature.id),
                    acres_gross=float(feature.acres_gross) or 0,
                    acres_parcel=float(feature.acres_parcel) or 0,
                    acres_parcel_res=float(feature.acres_parcel_res) or 0,
                    acres_parcel_emp=float(feature.acres_parcel_emp) or 0,
                    acres_parcel_mixed=float(feature.acres_parcel_mixed_use)
                    or 0,
                    intersections_qtrmi=float(
                        feature.intersection_density_sqmi) or 0,
                    du=float(feature.du) or 0,
                    du_occupancy_rate=float(feature.hh /
                                            feature.du if feature.du else 0),
                    du_detsf=float(feature.du_detsf) or 0,
                    du_attsf=float(feature.du_attsf) or 0,
                    du_mf=float(feature.du_mf) or 0,
                    du_mf2to4=float(feature.du_mf2to4) or 0,
                    du_mf5p=float(feature.du_mf5p) or 0,
                    hh=float(feature.hh) or 0,
                    hh_avg_size=float(feature.pop /
                                      feature.hh if feature.hh > 0 else 0),
                    hh_avg_inc=float(census_rates_feature.hh_agg_inc_rate)
                    or 0,
                    hh_inc_00_10=float(
                        feature.hh * census_rates_feature.hh_inc_00_10_rate)
                    or 0,
                    hh_inc_10_20=float(
                        feature.hh * census_rates_feature.hh_inc_10_20_rate)
                    or 0,
                    hh_inc_20_30=float(
                        feature.hh * census_rates_feature.hh_inc_20_30_rate)
                    or 0,
                    hh_inc_30_40=float(
                        feature.hh * census_rates_feature.hh_inc_30_40_rate)
                    or 0,
                    hh_inc_40_50=float(
                        feature.hh * census_rates_feature.hh_inc_40_50_rate)
                    or 0,
                    hh_inc_50_60=float(
                        feature.hh * census_rates_feature.hh_inc_50_60_rate)
                    or 0,
                    hh_inc_60_75=float(
                        feature.hh * census_rates_feature.hh_inc_60_75_rate)
                    or 0,
                    hh_inc_75_100=float(
                        feature.hh * census_rates_feature.hh_inc_75_100_rate)
                    or 0,
                    hh_inc_100p=float(
                        feature.hh *
                        (census_rates_feature.hh_inc_100_125_rate +
                         census_rates_feature.hh_inc_125_150_rate +
                         census_rates_feature.hh_inc_150_200_rate +
                         census_rates_feature.hh_inc_200p_rate)) or 0,
                    pop=float(feature.pop) or 0,
                    pop_employed=float(
                        feature.pop * census_rates_feature.pop_age16_up_rate *
                        census_rates_feature.pop_employed_rate) or 0,
                    pop_age16_up=float(
                        feature.pop * census_rates_feature.pop_age16_up_rate)
                    or 0,
                    pop_age65_up=float(
                        feature.pop * census_rates_feature.pop_age65_up_rate)
                    or 0,
                    emp=float(feature.emp) or 0,
                    emp_retail=float(feature.emp_retail_services +
                                     feature.emp_other_services) or 0,
                    emp_restaccom=float(feature.emp_accommodation +
                                        feature.emp_restaurant) or 0,
                    emp_arts_entertainment=float(
                        feature.emp_arts_entertainment) or 0,
                    emp_office=float(feature.emp_off) or 0,
                    emp_public=float(feature.emp_public_admin +
                                     feature.emp_education) or 0,
                    emp_industry=float(feature.emp_ind + feature.emp_ag) or 0,
                    emp_within_1mile=float(vmt_variables_feature.emp_1mile)
                    or 0,
                    hh_within_quarter_mile_trans=1
                    if vmt_variables_feature.transit_1km > 0 else 0,
                    vb_acres_parcel_res_total=float(
                        vmt_variables_feature.acres_parcel_res_vb) or 0,
                    vb_acres_parcel_emp_total=float(
                        vmt_variables_feature.acres_parcel_emp_vb) or 0,
                    vb_acres_parcel_mixed_total=float(
                        vmt_variables_feature.acres_parcel_mixed_use_vb) or 0,
                    vb_du_total=float(vmt_variables_feature.du_vb) or 0,
                    vb_pop_total=float(vmt_variables_feature.pop_vb) or 0,
                    vb_emp_total=float(vmt_variables_feature.emp_vb) or 0,
                    vb_emp_retail_total=float(vmt_variables_feature.emp_ret_vb)
                    or 0,
                    vb_hh_total=float(vmt_variables_feature.hh_vb) or 0,
                    vb_du_mf_total=float(vmt_variables_feature.du_mf_vb) or 0,
                    vb_hh_inc_00_10_total=float(
                        vmt_variables_feature.hh_inc_00_10_vb) or 0,
                    vb_hh_inc_10_20_total=float(
                        vmt_variables_feature.hh_inc_10_20_vb) or 0,
                    vb_hh_inc_20_30_total=float(
                        vmt_variables_feature.hh_inc_20_30_vb) or 0,
                    vb_hh_inc_30_40_total=float(
                        vmt_variables_feature.hh_inc_30_40_vb) or 0,
                    vb_hh_inc_40_50_total=float(
                        vmt_variables_feature.hh_inc_40_50_vb) or 0,
                    vb_hh_inc_50_60_total=float(
                        vmt_variables_feature.hh_inc_50_60_vb) or 0,
                    vb_hh_inc_60_75_total=float(
                        vmt_variables_feature.hh_inc_60_75_vb) or 0,
                    vb_hh_inc_75_100_total=float(
                        vmt_variables_feature.hh_inc_75_100_vb) or 0,
                    vb_hh_inc_100p_total=float(
                        vmt_variables_feature.hh_inc_100p_vb) or 0,
                    vb_pop_employed_total=float(
                        vmt_variables_feature.pop_employed_vb) or 0,
                    vb_pop_age16_up_total=float(
                        vmt_variables_feature.pop_age16_up_vb) or 0,
                    vb_pop_age65_up_total=float(
                        vmt_variables_feature.pop_age65_up_vb) or 0,
                    emp30m_transit=float(
                        trip_lengths_feature.emp_30min_transit) or 0,
                    emp45m_transit=float(
                        trip_lengths_feature.emp_45min_transit) or 0,
                    prod_hbw=float(trip_lengths_feature.productions_hbw) or 0,
                    prod_hbo=float(trip_lengths_feature.productions_hbo) or 0,
                    prod_nhb=float(trip_lengths_feature.productions_nhb) or 0,
                    attr_hbw=float(trip_lengths_feature.attractions_hbw) or 0,
                    attr_hbo=float(trip_lengths_feature.attractions_hbo) or 0,
                    attr_nhb=float(trip_lengths_feature.attractions_nhb) or 0,
                    qmb_acres_parcel_res_total=float(
                        vmt_variables_feature.acres_parcel_res_qtrmi) or 0,
                    qmb_acres_parcel_emp_total=float(
                        vmt_variables_feature.acres_parcel_emp_qtrmi) or 0,
                    qmb_acres_parcel_mixed_total=float(
                        vmt_variables_feature.acres_parcel_mixed_use_qtrmi)
                    or 0,
                    qmb_du_total=float(vmt_variables_feature.du_qtrmi) or 0,
                    qmb_pop_total=float(vmt_variables_feature.pop_qtrmi) or 0,
                    qmb_emp_total=float(vmt_variables_feature.emp_qtrmi) or 0,
                    qmb_emp_retail=float(vmt_variables_feature.emp_ret_qtrmi)
                    or 0,
                    hh_avg_veh=float(census_rates_feature.hh_agg_veh_rate)
                    or 0,
                    truck_adjustment_factor=0.031,
                    total_employment=float(total_employment['emp__sum']) or 0)

                # run raw trip generation
                vmt_feature_trips = generate_raw_trips(vmt_feature)

                # run trip purpose splits
                vmt_feature_trip_purposes = calculate_trip_purpose_splits(
                    vmt_feature_trips)

                # run log odds
                vmt_feature_log_odds = calculate_log_odds(
                    vmt_feature_trip_purposes)

                # run vmt equations
                vmt_output = calculate_final_vmt_results(vmt_feature_log_odds)

                # filters the vmt feature dictionary for specific output fields for writing to the database
                output_list = map(lambda key: vmt_output[key],
                                  vmt_output_field_list)
                vmt_output_list.append(output_list)
Пример #42
0
def run_vmt_variable_trip_length_buffers(sql_config_dict):

    drop_table('{vmt_variables_schema}.{vmt_variables_table}_vmt_variable'.format(
        vmt_variables_schema=sql_config_dict['vmt_variables_schema'],
        vmt_variables_table=sql_config_dict['vmt_variables_table']))

    pSql = '''
    create table {vmt_variables_schema}.{vmt_variables_table}_vmt_variable
    as select
      a.id, st_transform(a.wkb_geometry, 3310) as wkb_geometry, cast(a.attractions_hbw * 1609.0 as float) as distance,
      sum(acres_parcel_res) as acres_parcel_res_vb,
      sum(acres_parcel_emp) as acres_parcel_emp_vb,
      sum(acres_parcel_mixed_use) as acres_parcel_mixed_use_vb,
      sum(pop) as pop_vb,
      sum(hh) as hh_vb,
      sum(du) as du_vb,
      sum(du_mf) as du_mf_vb,
      sum(emp) as emp_vb,
      sum(emp_ret) as emp_ret_vb,
      sum(hh_inc_00_10) as hh_inc_00_10_vb,
      sum(hh_inc_10_20) as hh_inc_10_20_vb,
      sum(hh_inc_20_30) as hh_inc_20_30_vb,
      sum(hh_inc_30_40) as hh_inc_30_40_vb,
      sum(hh_inc_40_50) as hh_inc_40_50_vb,
      sum(hh_inc_50_60) as hh_inc_50_60_vb,
      sum(hh_inc_60_75) as hh_inc_60_75_vb,
      sum(hh_inc_75_100) as hh_inc_75_100_vb,
      sum(hh_inc_100p) as hh_inc_100p_vb,
      sum(pop_employed) as pop_employed_vb,
      sum(pop_age16_up) as pop_age16_up_vb,
      sum(pop_age65_up) as pop_age65_up_vb

    from
      (select id, wkb_geometry, attractions_hbw from {trip_lengths_schema}.{trip_lengths_table}) a,
      (select point, acres_parcel_res, acres_parcel_emp, acres_parcel_mixed_use, pop, hh, du, du_mf, emp, emp_ret,
            hh * hh_inc_00_10_rate as hh_inc_00_10,
            hh * hh_inc_10_20_rate as hh_inc_10_20,
            hh * hh_inc_20_30_rate as hh_inc_20_30,
            hh * hh_inc_30_40_rate as hh_inc_30_40,
            hh * hh_inc_40_50_rate as hh_inc_40_50,
            hh * hh_inc_50_60_rate as hh_inc_50_60,
            hh * hh_inc_60_75_rate as hh_inc_60_75,
            hh * hh_inc_75_100_rate as hh_inc_75_100,
            hh * hh_inc_100p_rate as hh_inc_100p,
            pop * pop_age16_up_rate * pop_employed_rate as pop_employed,
            pop * pop_age16_up_rate as pop_age16_up,
            pop * pop_age65_up_rate as pop_age65_up

        from (select st_centroid(wkb_geometry) as point, pop, hh, du, du_mf, emp, emp_ret, acres_parcel_res,
        acres_parcel_emp, acres_parcel_mixed_use
        from {uf_canvas_schema}.{uf_canvas_table}) a,
             (select wkb_geometry, hh_inc_00_10_rate, hh_inc_10_20_rate, hh_inc_20_30_rate,
              hh_inc_30_40_rate, hh_inc_40_50_rate, hh_inc_50_60_rate, hh_inc_60_75_rate,
              hh_inc_75_100_rate, hh_inc_100_125_rate + hh_inc_125_150_rate + hh_inc_150_200_rate + hh_inc_200p_rate as hh_inc_100p_rate,
              pop_employed_rate, pop_age16_up_rate, pop_age65_up_rate from {census_rates_schema}.{census_rates_table}) c
          where st_intersects(point, c.wkb_geometry)
          ) b
    where st_intersects(point, a.wkb_geometry) group by a.id, a.wkb_geometry, a.attractions_hbw;
    '''.format(vmt_variables_schema=sql_config_dict['vmt_variables_schema'],
               vmt_variables_table=sql_config_dict['vmt_variables_table'],
               uf_canvas_schema=sql_config_dict['uf_canvas_schema'],
               uf_canvas_table=sql_config_dict['uf_canvas_table'],
               census_rates_schema=sql_config_dict['census_rates_schema'],
               census_rates_table=sql_config_dict['census_rates_table'],
               trip_lengths_schema=sql_config_dict['trip_lengths_schema'],
               trip_lengths_table=sql_config_dict['trip_lengths_table'])

    execute_sql(pSql)

    add_geom_idx(sql_config_dict['vmt_variables_schema'], sql_config_dict['vmt_variables_table'] + '_vmt_variable')
    add_primary_key(sql_config_dict['vmt_variables_schema'], sql_config_dict['vmt_variables_table'] + '_vmt_variable', 'id')

    aggregate_within_variable_distance(dict(
        source_table=sql_config_dict['vmt_variables_schema'] + '.' + sql_config_dict['vmt_variables_table'] + '_vmt_variable',
        source_table_query='du_vb + emp_vb > 0',
        target_table_schema=sql_config_dict['vmt_variables_schema'],
        target_table=sql_config_dict['vmt_variables_table'],
        target_table_query='id is not null',
        target_table_pk='id',
        suffix='vmt_vb',
        aggregation_type='sum',
        variable_field_list=['acres_parcel_res_vb', 'acres_parcel_emp_vb', 'acres_parcel_mixed_use_vb', 'du_vb', 'pop_vb',
                             'emp_vb', 'emp_ret_vb', 'hh_vb', 'du_mf_vb', 'hh_inc_00_10_vb', 'hh_inc_10_20_vb',
                             'hh_inc_20_30_vb', 'hh_inc_30_40_vb', 'hh_inc_40_50_vb', 'hh_inc_50_60_vb',
                             'hh_inc_60_75_vb', 'hh_inc_75_100_vb', 'hh_inc_100p_vb', 'pop_employed_vb',
                             'pop_age16_up_vb', 'pop_age65_up_vb']
    ))

    pSql = '''DROP INDEX {schema}.{schema}_{table}_analysis_geom;
    Alter Table {schema}.{table} drop column analysis_geom;'''.format(schema=sql_config_dict['vmt_variables_schema'],
                                                                   table=sql_config_dict['vmt_variables_table'])
    execute_sql(pSql)
Пример #43
0
    def update(self, **kwargs):

        # Make sure all related models have been created before querying
        logger.info("Executing Vmt using {0}".format(self.config_entity))

        self.vmt_progress(0.1, **kwargs)

        vmt_result_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT)
        vmt_variables_feature_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_VARIABLES)
        census_rates_feature_class = self.config_entity.db_entity_feature_class(DbEntityKey.CENSUS_RATES)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            scenario_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE)
            trip_lengths_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_FUTURE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(DbEntityKey.FUTURE_TRANSIT_STOPS)
            is_future = True
        else:
            scenario_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)
            trip_lengths_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_BASE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_TRANSIT_STOPS)
            is_future = False

        sql_config_dict = dict(
            vmt_result_table=vmt_result_class.db_entity_key,
            vmt_schema=parse_schema_and_table(vmt_result_class._meta.db_table)[0],
            uf_canvas_table=scenario_class.db_entity_key,
            uf_canvas_schema=parse_schema_and_table(scenario_class._meta.db_table)[0],
            census_rates_table=census_rates_feature_class.db_entity_key,
            census_rates_schema=parse_schema_and_table(census_rates_feature_class._meta.db_table)[0],
            trip_lengths_table=trip_lengths_class.db_entity_key,
            trip_lengths_schema=parse_schema_and_table(trip_lengths_class._meta.db_table)[0],
            vmt_variables_table=vmt_variables_feature_class.db_entity_key,
            vmt_variables_schema=parse_schema_and_table(vmt_variables_feature_class._meta.db_table)[0],
            vmt_rel_table=parse_schema_and_table(vmt_result_class._meta.db_table)[1],
            vmt_rel_column=vmt_result_class._meta.parents.values()[0].column,
            transit_stop_schema=parse_schema_and_table(transit_stop_class._meta.db_table)[0],
            transit_stop_table=transit_stop_class.db_entity_key,
            config_entity=self.config_entity
        )
        #
        if not kwargs.get('postprocess_only'):
            self.run_vmt_preprocesses(sql_config_dict, **kwargs)

        drop_table('{vmt_schema}.{vmt_result_table}'.format(**sql_config_dict))
        truncate_table('{vmt_schema}.{vmt_rel_table}'.format(**sql_config_dict))

        attribute_list = filter(lambda x: x != 'id', vmt_output_field_list)
        output_field_syntax = 'id int, ' + create_sql_calculations(attribute_list, '{0} numeric(14, 4)')

        pSql = '''
        create table {vmt_schema}.{vmt_result_table} ({output_field_syntax});'''.format(
            output_field_syntax=output_field_syntax, **sql_config_dict)
        execute_sql(pSql)

        trip_lengths = DbEntityKey.VMT_FUTURE_TRIP_LENGTHS if is_future else DbEntityKey.VMT_BASE_TRIP_LENGTHS
        total_employment = scenario_class.objects.aggregate(Sum('emp'))
        all_features = scenario_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0))
        all_features_length = len(all_features)

        max_id = scenario_class.objects.all().order_by("-id")[0].id
        min_id = scenario_class.objects.all().order_by("id")[0].id

         # This section of the model passes data from POSTGRES into Python and is saved in memory before being committed
        # back to the database. In order to not use all memory with large datasets, jobs are broken up with a maximum
        # job size of JOB_SIZE rows before being committed to the database. It will iterate through until all rows are
        # calculated and committed.
        if all_features_length > self.JOB_SIZE:
            job_count = all_features_length / self.JOB_SIZE
            rows_per_range = (max_id - min_id) / job_count
        else:
            rows_per_range = max_id - min_id
            job_count = 1
        print 'Job Count: {0}'.format(job_count)
        start_id = min_id

        for i in range(job_count):
            if i == job_count - 1:
                end_id = max_id
            else:
                end_id = start_id + rows_per_range - 1
            logger.info('Job: {0}'.format(i))
            logger.info('Start Id: {0}'.format(start_id))
            logger.info('End Id: {0}'.format(end_id))

            vmt_output_list = []

            features = all_features.filter(id__range=(start_id, end_id))
            annotated_features = annotated_related_feature_class_pk_via_geographies(features, self.config_entity, [
                DbEntityKey.VMT_VARIABLES, DbEntityKey.CENSUS_RATES, DbEntityKey.VMT_FUTURE_TRIP_LENGTHS, DbEntityKey.VMT_BASE_TRIP_LENGTHS, trip_lengths])

            assert annotated_features.exists(), "VMT is about to process 0 results"

            failed_features = []

            for feature in annotated_features:
                trip_length_id = feature.vmt_future_trip_lengths if is_future else feature.vmt_base_trip_lengths
                try:
                    trip_lengths_feature = trip_lengths_class.objects.get(id=trip_length_id)
                except trip_lengths_class.DoesNotExist, e:
                    failed_features.append(feature)
                    logger.error('Cannot find trip lengths for geography with id = {0}'.format(feature.id))
                    continue

                vmt_variables_feature = vmt_variables_feature_class.objects.get(id=feature.vmt_variables)

                try:
                    census_rates_feature = census_rates_feature_class.objects.get(id=feature.census_rates)
                except census_rates_feature_class.DoesNotExist, e:
                    logger.error('Cannot find census rate with id = {0}'.format(feature.census_rates))
                    continue

                vmt_feature = dict(
                    id=int(feature.id),
                    acres_gross=float(feature.acres_gross) or 0,
                    acres_parcel=float(feature.acres_parcel) or 0,
                    acres_parcel_res=float(feature.acres_parcel_res) or 0,
                    acres_parcel_emp=float(feature.acres_parcel_emp) or 0,
                    acres_parcel_mixed=float(feature.acres_parcel_mixed_use) or 0,
                    intersections_qtrmi=float(feature.intersection_density_sqmi) or 0,
                    du=float(feature.du) or 0,
                    du_occupancy_rate=float(feature.hh / feature.du if feature.du else 0),
                    du_detsf=float(feature.du_detsf) or 0,
                    du_attsf=float(feature.du_attsf) or 0,

                    du_mf=float(feature.du_mf) or 0,
                    du_mf2to4=float(feature.du_mf2to4) or 0,
                    du_mf5p=float(feature.du_mf5p) or 0,
                    hh=float(feature.hh) or 0,
                    hh_avg_size=float(feature.pop / feature.hh if feature.hh > 0 else 0),
                    hh_avg_inc=float(census_rates_feature.hh_agg_inc_rate) or 0,

                    hh_inc_00_10=float(feature.hh * census_rates_feature.hh_inc_00_10_rate) or 0,
                    hh_inc_10_20=float(feature.hh * census_rates_feature.hh_inc_10_20_rate) or 0,
                    hh_inc_20_30=float(feature.hh * census_rates_feature.hh_inc_20_30_rate) or 0,
                    hh_inc_30_40=float(feature.hh * census_rates_feature.hh_inc_30_40_rate) or 0,
                    hh_inc_40_50=float(feature.hh * census_rates_feature.hh_inc_40_50_rate) or 0,
                    hh_inc_50_60=float(feature.hh * census_rates_feature.hh_inc_50_60_rate) or 0,
                    hh_inc_60_75=float(feature.hh * census_rates_feature.hh_inc_60_75_rate) or 0,
                    hh_inc_75_100=float(feature.hh * census_rates_feature.hh_inc_75_100_rate) or 0,
                    hh_inc_100p=float(feature.hh * (census_rates_feature.hh_inc_100_125_rate +
                                                                     census_rates_feature.hh_inc_125_150_rate +
                                                                     census_rates_feature.hh_inc_150_200_rate +
                                                                     census_rates_feature.hh_inc_200p_rate)) or 0,

                    pop=float(feature.pop) or 0,
                    pop_employed=float(feature.pop * census_rates_feature.pop_age16_up_rate *
                                       census_rates_feature.pop_employed_rate) or 0,
                    pop_age16_up=float(feature.pop * census_rates_feature.pop_age16_up_rate) or 0,
                    pop_age65_up=float(feature.pop * census_rates_feature.pop_age65_up_rate) or 0,

                    emp=float(feature.emp) or 0,
                    emp_retail=float(feature.emp_retail_services + feature.emp_other_services) or 0,
                    emp_restaccom=float(feature.emp_accommodation + feature.emp_restaurant) or 0,
                    emp_arts_entertainment=float(feature.emp_arts_entertainment) or 0,
                    emp_office=float(feature.emp_off) or 0,
                    emp_public=float(feature.emp_public_admin + feature.emp_education) or 0,
                    emp_industry=float(feature.emp_ind + feature.emp_ag) or 0,

                    emp_within_1mile=float(vmt_variables_feature.emp_1mile) or 0,
                    hh_within_quarter_mile_trans=1 if vmt_variables_feature.transit_1km > 0 else 0,

                    vb_acres_parcel_res_total=float(vmt_variables_feature.acres_parcel_res_vb) or 0,
                    vb_acres_parcel_emp_total=float(vmt_variables_feature.acres_parcel_emp_vb) or 0,
                    vb_acres_parcel_mixed_total=float(vmt_variables_feature.acres_parcel_mixed_use_vb) or 0,
                    vb_du_total=float(vmt_variables_feature.du_vb) or 0,
                    vb_pop_total=float(vmt_variables_feature.pop_vb) or 0,
                    vb_emp_total=float(vmt_variables_feature.emp_vb) or 0,
                    vb_emp_retail_total=float(vmt_variables_feature.emp_ret_vb) or 0,
                    vb_hh_total=float(vmt_variables_feature.hh_vb) or 0,
                    vb_du_mf_total=float(vmt_variables_feature.du_mf_vb) or 0,
                    vb_hh_inc_00_10_total=float(vmt_variables_feature.hh_inc_00_10_vb) or 0,
                    vb_hh_inc_10_20_total=float(vmt_variables_feature.hh_inc_10_20_vb) or 0,
                    vb_hh_inc_20_30_total=float(vmt_variables_feature.hh_inc_20_30_vb) or 0,
                    vb_hh_inc_30_40_total=float(vmt_variables_feature.hh_inc_30_40_vb) or 0,
                    vb_hh_inc_40_50_total=float(vmt_variables_feature.hh_inc_40_50_vb) or 0,
                    vb_hh_inc_50_60_total=float(vmt_variables_feature.hh_inc_50_60_vb) or 0,
                    vb_hh_inc_60_75_total=float(vmt_variables_feature.hh_inc_60_75_vb) or 0,
                    vb_hh_inc_75_100_total=float(vmt_variables_feature.hh_inc_75_100_vb) or 0,
                    vb_hh_inc_100p_total=float(vmt_variables_feature.hh_inc_100p_vb) or 0,

                    vb_pop_employed_total=float(vmt_variables_feature.pop_employed_vb) or 0,
                    vb_pop_age16_up_total=float(vmt_variables_feature.pop_age16_up_vb) or 0,
                    vb_pop_age65_up_total=float(vmt_variables_feature.pop_age65_up_vb) or 0,

                    emp30m_transit=float(trip_lengths_feature.emp_30min_transit) or 0,
                    emp45m_transit=float(trip_lengths_feature.emp_45min_transit) or 0,
                    prod_hbw=float(trip_lengths_feature.productions_hbw) or 0,
                    prod_hbo=float(trip_lengths_feature.productions_hbo) or 0,
                    prod_nhb=float(trip_lengths_feature.productions_nhb) or 0,
                    attr_hbw=float(trip_lengths_feature.attractions_hbw) or 0,
                    attr_hbo=float(trip_lengths_feature.attractions_hbo) or 0,
                    attr_nhb=float(trip_lengths_feature.attractions_nhb) or 0,

                    qmb_acres_parcel_res_total=float(vmt_variables_feature.acres_parcel_res_qtrmi) or 0,
                    qmb_acres_parcel_emp_total=float(vmt_variables_feature.acres_parcel_emp_qtrmi) or 0,
                    qmb_acres_parcel_mixed_total=float(vmt_variables_feature.acres_parcel_mixed_use_qtrmi) or 0,
                    qmb_du_total=float(vmt_variables_feature.du_qtrmi) or 0,
                    qmb_pop_total=float(vmt_variables_feature.pop_qtrmi) or 0,
                    qmb_emp_total=float(vmt_variables_feature.emp_qtrmi) or 0,
                    qmb_emp_retail=float(vmt_variables_feature.emp_ret_qtrmi) or 0,
                    hh_avg_veh=float(census_rates_feature.hh_agg_veh_rate) or 0,

                    truck_adjustment_factor=0.031,
                    total_employment=float(total_employment['emp__sum']) or 0)

                # run raw trip generation
                vmt_feature_trips = generate_raw_trips(vmt_feature)

                # run trip purpose splits
                vmt_feature_trip_purposes = calculate_trip_purpose_splits(vmt_feature_trips)

                # run log odds
                vmt_feature_log_odds = calculate_log_odds(vmt_feature_trip_purposes)

                # run vmt equations
                vmt_output = calculate_final_vmt_results(vmt_feature_log_odds)

                # filters the vmt feature dictionary for specific output fields for writing to the database
                output_list = map(lambda key: vmt_output[key], vmt_output_field_list)
                vmt_output_list.append(output_list)
def calculate_distance(distance_options):
    print 'Calculating distance from target features areas'

    ##ST_DISTANCE returns distances in meters from geometries in WGS84 projection if set to false

    thread_count = count_cores()
    queue = queue_process()

    #if the source table query has not results set all values to the max and break
    zero_values_check = report_sql_values(
        '''select
              sum(*)
              from {source_table} where {source_table_query};'''.format(**distance_options), 'fetchone')

    if len(zero_values_check) == 0:
        pSql = '''
        update {target_table_schema}.{target_table} a set
          {column} = {maximum_distance}
          where {target_table_query} and {column} = 0
        '''.format(**distance_options)

        execute_sql(pSql)
        return


    pSql = '''drop function if exists distance_tool(
      in_id int,
      in_wkb_geometry geometry,
      out id int,
      out {column} float) cascade;'''.format(**distance_options)

    execute_sql(pSql)

    pSql = '''
    CREATE OR REPLACE FUNCTION distance_tool(
      in_id int,
      in_wkb_geometry geometry,
      out id int,
      out {column} float)
    AS
    $$
      select
        $1 as id,
        cast(st_distance(st_centroid($2), st_centroid(ref.geometry)) as float) as {column}
        from
            (select *, {source_geometry_column} as geometry from {source_table}) ref
        where ST_DWITHIN($2, ref.geometry, {maximum_distance}) and ({source_table_query})
                order by {column};
    $$
    COST 10000
    language SQL STABLE strict;
    '''.format(**distance_options)

    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{column}'.format(**distance_options))

    pSql = '''
      create table {target_table_schema}.{target_table}_{column} (id int, {column} float);
    '''.format(**distance_options)

    execute_sql(pSql)

    id_list = flatten(report_sql_values(
        '''select cast({target_table_pk} as int) from {target_table_schema}.{target_table}
            where {target_table_query} order by {target_table_pk}'''.format(
        **distance_options), 'fetchall'))

    insert_sql = '''
    insert into {target_table_schema}.{target_table}_{column}
      select (f).* from (
          select distance_tool(a.id, a.wkb_geometry) as f
          from (select {target_table_pk} as id, {target_geometry_column} as wkb_geometry
                from {target_table_schema}.{target_table}
                where
                    {target_table_pk} >= {bottom_range_id} and
                    {target_table_pk} <= {top_range_id} and
                    ({target_table_query})
                ) a
          ) s;
    '''.format(bottom_range_id="{start_id}", top_range_id="{end_id}", **distance_options)

    for i in range(thread_count):
        t = MultithreadProcess(queue, insert_sql)
        t.setDaemon(True)
        t.start()

    #populate queue with data
    rows_per_thread = len(id_list) / thread_count
    offset = 0

    for i in range(thread_count):
        if i == thread_count - 1:
            ## last bucket gets any remainder, too
            last_thread = len(id_list) - 1
        else:
            last_thread = offset + rows_per_thread - 1

        rows_to_process = {
            'start_id': id_list[offset],
            'end_id': id_list[last_thread]
        }
        offset += rows_per_thread
        queue.put(rows_to_process)

    #wait on the queue until everything has been processed
    queue.join()

    add_attribute_idx(distance_options['target_table_schema'],
                      '{target_table}_{column}'.format(**distance_options), 'id')

    pSql = '''
    update {target_table_schema}.{target_table} a set
      {column} = source_column
        from (select id as source_id, {column} as source_column from {target_table_schema}.{target_table}_{column})  b
            where cast(a.{target_table_pk} as int) = b.source_id and ({target_table_query})
    '''.format(**distance_options)
    execute_sql(pSql)

    pSql = '''
    update {target_table_schema}.{target_table} a set
      {column} = {maximum_distance}
      where {target_table_query} and {column} = 0
    '''.format(**distance_options)

    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{column}'.format(**distance_options))
def aggregate_within_variable_distance(distance_options):

    thread_count = count_cores()
    queue = queue_process()

    sql_format = 'out {formatter} float'.format(formatter="{0}")
    output_field_format = create_sql_calculations(distance_options['variable_field_list'], sql_format, ', ')

    sql_format = 'cast({aggregation_type}({formatter}) as float) as {formatter}'.format(formatter="{0}", aggregation_type=distance_options['aggregation_type'])
    sql_calculations_format = create_sql_calculations(distance_options['variable_field_list'], sql_format, ', ')

    pSql = '''
    drop function if exists aggregate_within_variable_distance_tool(
      in_id int,
      in_distance float,
      in_geometry geometry,
      out id int,
      out wkb_geometry geometry,
      {output_field_format}) cascade;'''.format(
        output_field_format=output_field_format)

    execute_sql(pSql)

    pSql = '''
    CREATE OR REPLACE FUNCTION aggregate_within_variable_distance_tool(
      in_id int,
      id_distance float,
      in_geometry geometry,
      out id int,
      out wkb_geometry geometry,
      {output_field_format})
    AS
    $$
      select
        $1 as id,
        $3 as wkb_geometry,
        {sql_calculations_format}

    from {source_table} ref
        WHERE st_dwithin($3, ref.wkb_geometry, $2) and (ref.{source_table_query});
    $$
    COST 10000
    language SQL STABLE strict;
    '''.format(source_table=distance_options['source_table'],
               source_table_query=distance_options['source_table_query'],
               output_field_format=output_field_format,
               sql_calculations_format=sql_calculations_format)

    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{suffix}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'], suffix=distance_options['suffix']))

    sql_format = '{formatter} float'.format(formatter="{0}", suffix=distance_options['suffix'])
    output_table_field_format = create_sql_calculations(distance_options['variable_field_list'], sql_format, ', ')

    pSql = '''create table {target_table_schema}.{target_table}_{suffix} (id int, wkb_geometry geometry, {output_table_field_format});'''.format(
        target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'],
        suffix=distance_options['suffix'], output_table_field_format=output_table_field_format)

    execute_sql(pSql)

    pSql = 'select cast(id as int) from {source_table} where id is not null order by id'.format(
        source_table=distance_options['source_table'])

    id_list = flatten(report_sql_values(pSql, 'fetchall'))

    insert_sql = '''
    insert into {target_table_schema}.{target_table}_{suffix}
      select (f).* from (
          select aggregate_within_variable_distance_tool(id, distance, wkb_geometry) as f
          from {source_table}
          where id >= {bottom_range_id} and id <= {top_range_id} and {source_table_query}
          ) s
    where (f).id is not null;
    '''.format(
        target_table_schema=distance_options['target_table_schema'],
        source_table_query=distance_options['source_table_query'],
        target_table=distance_options['target_table'],
        source_table=distance_options['source_table'],
        suffix=distance_options['suffix'],
        bottom_range_id="{start_id}",
        top_range_id="{end_id}")

    for i in range(thread_count):
        t = MultithreadProcess(queue, insert_sql)
        t.setDaemon(True)
        t.start()

    #populate queue with data
    rows_per_thread = len(id_list) / thread_count
    offset = 0

    for i in range(thread_count):
        if i == thread_count - 1:
            ## last bucket gets any remainder, too
            last_thread = len(id_list) - 1
        else:
            last_thread = offset + rows_per_thread - 1

        rows_to_process = {
            'start_id': id_list[offset],
            'end_id': id_list[last_thread]
        }
        offset += rows_per_thread
        queue.put(rows_to_process)

    #wait on the queue until everything has been processed
    queue.join()

    add_attribute_idx(distance_options['target_table_schema'],
                      '{target_table}_{suffix}'.format(target_table=distance_options['target_table'],
                                                       suffix=distance_options['suffix']), 'id')

    update_table_field_format = create_sql_calculations(distance_options['variable_field_list'], '{0} = (case when b.{0}_var is null then 0 else b.{0}_var end)', ', ')
    select_format = create_sql_calculations(distance_options['variable_field_list'], '{0} as {0}_var', ', ')

    pSql = '''
    update {target_table_schema}.{target_table} a set {update_table_field_format}
        from (select id as {suffix}_id, wkb_geometry, {select_format} from {target_table_schema}.{target_table}_{suffix}) b
            where st_intersects(st_centroid(a.analysis_geom), b.wkb_geometry) and {target_table_query};
    '''.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        target_table_query=distance_options['target_table_query'],
        target_table_pk=distance_options['target_table_pk'],
        update_table_field_format=update_table_field_format,
        select_format=select_format,
        suffix=distance_options['suffix']
    )
    execute_sql(pSql)

    drop_table('{target_table_schema}.{target_table}_{suffix}'.format(
        target_table_schema=distance_options['target_table_schema'],
        target_table=distance_options['target_table'],
        suffix=distance_options['suffix']))
Пример #46
0
def populate_transformations_and_indices(sql_config_dict):

    ##TRANSFORMING PRELIMINARY AND MODEL VARIABLES STEPS 1 AND 2
    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
        resmix_dens = (case
          when resmix_dens = 0
            then 0
          else ln((du_1km_tr / resmix_dens) + 1) end),
        bldg_sqft_ret = bldg_sqft_ret ^ 0.25,
        b1 = (case when b1 = 0 then 1E-24 else b1 end),
        b2 = (case when b2 = 0 then 1E-24 else b2 end),
        b3 = (case when b3 = 0 then 1E-24 else b3 end),
        b4 = (case when b4 = 0 then 1E-24 else b4 end),
        b5 = (case when b5 = 0 then 1E-24 else b5 end),
        local_street = local_street / 5280,
        major_street = (major_street / 5280) ^ 0.5,
        far_nonres = (case when far_nonres = 0 then 0 else ((bldg_sqft_ret1 + bldg_sqft_off) / (far_nonres * 43560)) ^ 0.25 end),
        rail_any = (case when rail_any > 0 then 1 else 0 end),
        freeway_arterial_any = (case when freeway_arterial_any <500 then 1 else 0 end),
        transit_distance = transit_distance ^ 0.25,
        transit_count = transit_count ^ 0.5,
        school_distance = school_distance ^ 0.25,
        retail_distance = retail_distance ^ 0.5,
        intersection_density_sqmi = intersection_density_sqmi ^ 0.5,
        park_open_space_distance = park_open_space_distance ^ 0.5,
        acres_parcel_park_open_space = acres_parcel_park_open_space ^ 0.25,
        du_variable = du_variable ^ 0.25,
        emp_variable = emp_variable ^ 0.25
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)

    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
    du_1km_tr = du_1km_tr ^ 0.25,
    a = b1 + b2 + b3 + b4 + b5
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)

    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
    mix5 = (case
          when a > 0 and ((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a)) / (-1 * ln(5)) < 0.0001
            then 0
          when a > 0 and ((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a)) / (-1 * ln(5)) > 0.999
            then 0
          when a > 0 and ((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a)) / (-1 * ln(5)) > 0.0001 and
            ((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a)) / (-1 * ln(5)) < 0.999
            then ((((b1/a)*LN(b1/a) + (b2/a)*LN(b2/a) + (b3/a)*LN(b3/a) + (b4/a)*LN(b4/a) + (b5/a)*LN(b5/a))) / (-1 * ln(5))) ^ 0.5
          else 0
          end)
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)

    ##CALCULATING PRELIMINARY INDEX VARIABLES

    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
        res_index = (resmix_dens - 1.82) / 0.9478 + (du_1km_tr - 7.451) / 1.8664,
        com_index = 2 * ((bldg_sqft_ret - 24.38) / 10.4543) + 2 * ((far_nonres - 0.5938) / 0.2064) - (retail_distance - 13.91) / 13.5014 - (restaurant_distance - 599.4) / 670.6584,
        park_access = (acres_parcel_park_open_space - 1.704) / 1.0956 - (park_open_space_distance - 18.48) / 11.3701,
        regional_access = (du_variable - 24.92) / 6.0667 + (emp_variable - 26.13) / 7.2701,
        network_index = (intersection_density_sqmi - 9.186) / 3.5870 + (local_street - 29.73) / 10.5599,
        transit_access = 2*((transit_count - 3.769) / 3.9104) - (transit_distance - 4.602) / 1.8653,
        major_road_index = (major_street - 0.6099) / 0.8575 + (freeway_arterial_any - 0.2126) / 0.4092
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)

    ##CALCULATING WALK INDEX VARIABLE
    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
        walk_index = 2 * ((res_index - 0.000221) / 1.9586) + 1.5 * ((network_index - 1.65E-16) / 1.846573714) + (com_index - 0.0005076) / 5.3122 + 0.5*((mix5 - 0.4379) / 0.1840)
    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)

    ##CALCULATING WALK INDEX VARIATIONS
    pSql = '''
    update {public_health_variables_schema}.{public_health_variables_table} a set
      walk_index_chts_senior_walk_any = (12.69/6.59) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_senior_auto_min = (7.09/2.63) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_teens_walk_any = (8.24/3.34) * ((walk_index - 0.0005914) / 4.4684) + (4.91/3.34) * ((transit_access - 2.727E-16) / 2.8399) + (rail_any - 0.09589) / 0.2944,
      walk_index_chts_child_walk_any = (8.94/5.81) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_adult_bike_any = (8.22/2.19) * ((walk_index - 0.0005914) / 4.4684) + (regional_access) / 1.9944,
      walk_index_chts_adult_walk_min = (9.79/5.56) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_senior_walk_min = (2.43/2.34) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_teens_walk_min = (5.12/2.42) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chts_adult_auto_min = (7.99/2.11) * ((walk_index - 0.0005914) / 4.4684) + (rail_any - 0.09589) / 0.2944,
      walk_index_chis_adult_modPA_any = (8.57/2.94) * ((walk_index - 0.0005914) / 4.4684) + (4.36/2.94) * ((transit_access - 2.727E-16) / 2.8399) + ((rail_any - 0.09589) / 0.2944) + (3.22/2.94) * ((regional_access) / 1.9944),
      walk_index_chis_adult_overweight = (walk_index - 0.0005914) / 4.4684 + (7.92/3.15) * ((regional_access) / 1.9944),
      walk_index_chis_senior_overweight = (walk_index - 0.0005914) / 4.4684 + (3.15/2.01) * ((regional_access) / 1.9944),
      walk_index_chis_adult_obese = (walk_index - 0.0005914) / 4.4684 + (6.07/2.58) * ((regional_access) / 1.9944),
      walk_index_chis_senior_gh = (2.74/2.14) * ((walk_index - 0.0005914) / 4.4684) + (regional_access) / 1.9944,
      walk_index_chis_senior_walk_le_min = (2.82/2.08) * ((walk_index - 0.0005914) / 4.4684) + (transit_access - 2.727E-16) / 2.8399,
      walk_index_chis_adult_modPA_min = (7.96/1.98) * ((walk_index - 0.0005914) / 4.4684) + (4.32/1.98) * ((transit_access - 2.727E-16) / 2.8399) + (rail_any - 0.09589) / 0.2944,
      walk_index_chis_adult_bmi = (walk_index - 0.0005914) / 4.4684 + (4.47/3.50) * ((transit_access - 2.727E-16) / 2.8399) + (8.94/3.50) * ((regional_access) / 1.9944),
      walk_index_chis_child_PA60 =(4.25/2.12) * ((walk_index - 0.0005914) / 4.4684) + (regional_access) / 1.9944,
      walk_index_chis_child_walk_school = (8.13/3.17)*((walk_index - 0.0005914) / 4.4684) + (regional_access) / 1.9944

    where pop > 0;
    '''.format(**sql_config_dict)

    execute_sql(pSql)