Exemple #1
0
    def run_analysis(self, **kwargs):

        start_time = time.time()
        self.report_progress(0.1, **kwargs)

        self.climate_zone_class = self.config_entity.db_entity_feature_class(DbEntityKey.CLIMATE_ZONES)
        self.energy_class = self.config_entity.db_entity_feature_class(DbEntityKey.ENERGY)
        self.base_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)
        self.rel_table = parse_schema_and_table(self.energy_class._meta.db_table)[1]
        self.rel_column = self.energy_class._meta.parents.values()[0].column

        options = dict(
            energy_result_table=self.energy_class.db_entity_key,
            energy_schema=parse_schema_and_table(self.energy_class._meta.db_table)[0],
            base_table=self.base_class.db_entity_key,
            base_schema=parse_schema_and_table(self.base_class._meta.db_table)[0],
        )
        logger.info("Running Energy Updater Tool with options %s" % options)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            self.end_state_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE)
            logger.info("Running Future Calculations")
            energy_output_list, options = self.run_future_calculations(options, **kwargs)
        else:
            logger.info("Running Base Calculations")
            energy_output_list, options = self.run_base_calculations(options, **kwargs)

        logger.info("Writing to feature table {energy_schema}.{energy_result_table}".format(**options))
        self.write_results_to_database(options, energy_output_list)


        logger.info("Writing to relative table {energy_schema}.{rel_table}".format(
            energy_schema=options['energy_schema'],
            rel_table=self.rel_table))

        updated = datetime.datetime.now()
        truncate_table(options['energy_schema'] + '.' + self.rel_table)

        pSql = '''
        insert into {energy_schema}.{rel_table} ({rel_column}, updated)
        select id, '{updated}' from {energy_schema}.{energy_result_table};'''.format(
            energy_schema=options['energy_schema'],
            energy_result_table=options['energy_result_table'],
            rel_table=self.rel_table,
            rel_column=self.rel_column,
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(self.config_entity, self.config_entity.computed_db_entities(key=DbEntityKey.ENERGY)[0])
        self.report_progress(0.2, **kwargs)

        logger.info('Finished: ' + str(time.time() - start_time))
    def run_analysis(self, **kwargs):

        start_time = time.time()
        self.report_progress(0.1, **kwargs)

        self.climate_zone_class = self.config_entity.db_entity_feature_class(DbEntityKey.CLIMATE_ZONES)
        self.energy_class = self.config_entity.db_entity_feature_class(DbEntityKey.ENERGY)
        self.base_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)
        self.rel_table = parse_schema_and_table(self.energy_class._meta.db_table)[1]
        self.rel_column = self.energy_class._meta.parents.values()[0].column

        options = dict(
            energy_result_table=self.energy_class.db_entity_key,
            energy_schema=parse_schema_and_table(self.energy_class._meta.db_table)[0],
            base_table=self.base_class.db_entity_key,
            base_schema=parse_schema_and_table(self.base_class._meta.db_table)[0],
        )
        logger.info("Running Energy Updater Tool with options %s" % options)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            self.end_state_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE)
            logger.info("Running Future Calculations")
            energy_output_list, options = self.run_future_calculations(options, **kwargs)
        else:
            logger.info("Running Base Calculations")
            energy_output_list, options = self.run_base_calculations(options, **kwargs)

        logger.info("Writing to feature table {energy_schema}.{energy_result_table}".format(**options))
        self.write_results_to_database(options, energy_output_list)


        logger.info("Writing to relative table {energy_schema}.{rel_table}".format(
            energy_schema=options['energy_schema'],
            rel_table=self.rel_table))

        updated = datetime.datetime.now()
        truncate_table(options['energy_schema'] + '.' + self.rel_table)

        pSql = '''
        insert into {energy_schema}.{rel_table} ({rel_column}, updated)
        select id, '{updated}' from {energy_schema}.{energy_result_table};'''.format(
            energy_schema=options['energy_schema'],
            energy_result_table=options['energy_result_table'],
            rel_table=self.rel_table,
            rel_column=self.rel_column,
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(self.config_entity, self.config_entity.computed_db_entities(key=DbEntityKey.ENERGY)[0])
        self.report_progress(0.2, **kwargs)

        logger.info('Finished: ' + str(time.time() - start_time))
Exemple #3
0
def drop_layer_selection_table(layer_selection_class):
    """
        Drop the dynamic LayerSelection class table. This should be called whenever the owning layer is deleted
    :param layer_selection_class:
    :return:
    """
    if InformationSchema.objects.table_exists(*parse_schema_and_table(layer_selection_class._meta.db_table)):
        drop_tables_for_dynamic_classes(layer_selection_class)
def create_tables_for_dynamic_classes(*model_classes):
    """
        Creates the table for the dynamic model class if needed
    :param model_classes: 0 or more model classes for which to create a table
    :return:
    """
    for model_class in model_classes:
        if dynamic_model_table_exists(model_class):
            continue

        #info = "Model class table {model_class} doesn't exist -- creating it \n"
        #logger.debug(info.format(model_class=model_class._meta.db_table))

        fields = [(f.name, f) for f in model_class._meta.local_fields]
        table_name = model_class._meta.db_table
        db.create_table(table_name, fields)

        # some fields (eg GeoDjango) require additional SQL to be executed
        # Because of the poor Django/GeoDjango support for schemas, we have to manipulate the GeoDjango sql here so that the table is resolved to the correct schema, sigh
        if len(table_name.split('.')) == 2:
            schema, table = parse_schema_and_table(table_name)
            for i, sql in enumerate(db.deferred_sql):
                # Replace the POSTGIS single argument with two arguments
                # TODO this stupidly assumes that all deferred sql is POSTGIS
                # Substitution for '"schema"."table"' to 'schema','table'. This is for AddGeometryColumn
                db.deferred_sql[i] = re.sub(
                    "'{0}'".format(table_name),
                    "'{0}','{1}'".format(schema, table), sql)
                # Substitution for "schema"."table" to schema.table. This is for CREATE INDEX
                db.deferred_sql[i] = re.sub("{0}".format(table_name),
                                            "{0}.{1}".format(schema, table),
                                            db.deferred_sql[i])
                # Substitution for "schema"."tableHEX". Some indexes add random hex to the table name inside the double quotes. They may also truncate the table name, so just capture everything between "s
                # Also truncate to 64 characters the schema name minus the length of the table name, favoring the end of the schema which is most unique
                db.deferred_sql[i] = re.sub(r'"(".*)"\."(.*") ON',
                                            r'\1.\2 ON'.format(schema, table),
                                            db.deferred_sql[i])
                # Last ditch effort to remove extra " when we can't match generated index
                db.deferred_sql[i] = re.sub(r'""', r'"', db.deferred_sql[i])
                if string.find(db.deferred_sql[i], 'CREATE INDEX') == 0:
                    subs = db.deferred_sql[i]
                    # Truncate the index name. This could be done more elegantly
                    # db.deferred_sql[i] = subs[0:14] + subs[14:string.index(subs, '" ON')][-63:] + subs[string.index(subs, '" ON'):]
                    db.deferred_sql[i] = subs[0:14] + table + '_' + re.findall(
                        r'"([^"]*)"',
                        subs)[1] + subs[string.index(subs, '" ON'):]

        try:
            db.execute_deferred_sql()
        except Exception, e:
            raise Exception(
                "The table {table_name} was not created. Original exception: {message}. Deferred sql calls: {sql}"
                .format(table_name=model_class._meta.db_table,
                        message=e.message,
                        sql='\n'.join(db.deferred_sql)))
        # TODO I don't know if or when this is needed.
        if transaction.is_managed():
            transaction.commit()
def drop_tables_for_dynamic_classes(*model_classes):
    for model_class in model_classes:
        full_table_name = model_class._meta.db_table
        schema, table = parse_schema_and_table(full_table_name)
        if InformationSchema.objects.table_exists(schema, table):
            try:
                logger.info("Dropping table %s" % full_table_name)
                db.delete_table(full_table_name)
            except DatabaseError, e:
                raise Exception('full_table_name: {full_table_name}. Original exception: {e}'.format(full_table_name=full_table_name, e=e))
Exemple #6
0
 def describe_table_columns(self, cursor, full_table_name):
     schema, table = parse_schema_and_table(full_table_name)
     # conn = psycopg2.connect(**pg_connection_parameters(settings.DATABASES['default']))
     # conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
     # cursor = conn.cursor()
     cursor.execute("""
         SELECT column_name, is_nullable
         FROM information_schema.columns
         WHERE table_name = %s and table_schema = %s""", [table, schema])
     null_map = dict(cursor.fetchall())
     cursor.execute('SELECT * FROM "{schema}"."{table}" LIMIT 1'.format(schema=schema, table=table))
     return cursor.description
def drop_tables_for_dynamic_classes(*model_classes):
    for model_class in model_classes:
        full_table_name = model_class._meta.db_table
        schema, table = parse_schema_and_table(full_table_name)
        if InformationSchema.objects.table_exists(schema, table):
            try:
                logger.info("Dropping table %s" % full_table_name)
                db.delete_table(full_table_name)
            except DatabaseError, e:
                raise Exception(
                    'full_table_name: {full_table_name}. Original exception: {e}'
                    .format(full_table_name=full_table_name, e=e))
    def run_water_calculations(self, **kwargs):
        self.format_policy_inputs()

        self.water_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.WATER)
        self.base_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.BASE_CANVAS)
        self.climate_zone_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.CLIMATE_ZONES)
        self.rel_table = parse_schema_and_table(
            self.water_class._meta.db_table)[1]
        self.rel_column = self.water_class._meta.parents.values()[0].column

        if isinstance(self.config_entity.subclassed, FutureScenario):
            self.report_progress(0.2, **kwargs)
            self.end_state_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.END_STATE)

            water_output_list, options = self.run_future_water_calculations(
                **kwargs)
        else:
            self.report_progress(0.2, **kwargs)
            water_output_list, options = self.run_base_water_calculations()
            self.report_progress(0.7, **kwargs)

        self.write_water_results_to_database(options, water_output_list)

        updated = datetime.datetime.now()
        truncate_table(options['water_schema'] + '.' + self.rel_table)

        pSql = '''
        insert into {water_schema}.{rel_table} ({rel_column}, updated) select id, '{updated}' from {water_schema}.{water_result_table};'''.format(
            water_schema=options['water_schema'],
            water_result_table=options['water_result_table'],
            rel_table=self.rel_table,
            rel_column=self.rel_column,
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(
            self.config_entity,
            self.config_entity.computed_db_entities(key=DbEntityKey.WATER)[0])
        self.report_progress(0.10000001, **kwargs)
def create_tables_for_dynamic_classes(*model_classes):
    """
        Creates the table for the dynamic model class if needed
    :param model_classes: 0 or more model classes for which to create a table
    :return:
    """
    for model_class in model_classes:
        if dynamic_model_table_exists(model_class):
            continue

        #info = "Model class table {model_class} doesn't exist -- creating it \n"
        #logger.debug(info.format(model_class=model_class._meta.db_table))

        fields = [(f.name, f) for f in model_class._meta.local_fields]
        table_name = model_class._meta.db_table
        db.create_table(table_name, fields)

        # some fields (eg GeoDjango) require additional SQL to be executed
        # Because of the poor Django/GeoDjango support for schemas, we have to manipulate the GeoDjango sql here so that the table is resolved to the correct schema, sigh
        if len(table_name.split('.'))==2:
            schema, table = parse_schema_and_table(table_name)
            for i, sql in enumerate(db.deferred_sql):
                # Replace the POSTGIS single argument with two arguments
                # TODO this stupidly assumes that all deferred sql is POSTGIS
                # Substitution for '"schema"."table"' to 'schema','table'. This is for AddGeometryColumn
                db.deferred_sql[i] = re.sub("'{0}'".format(table_name), "'{0}','{1}'".format(schema, table), sql)
                # Substitution for "schema"."table" to schema.table. This is for CREATE INDEX
                db.deferred_sql[i] = re.sub("{0}".format(table_name), "{0}.{1}".format(schema, table), db.deferred_sql[i])
                # Substitution for "schema"."tableHEX". Some indexes add random hex to the table name inside the double quotes. They may also truncate the table name, so just capture everything between "s
                # Also truncate to 64 characters the schema name minus the length of the table name, favoring the end of the schema which is most unique
                db.deferred_sql[i] = re.sub(r'"(".*)"\."(.*") ON', r'\1.\2 ON'.format(schema, table), db.deferred_sql[i])
                # Last ditch effort to remove extra " when we can't match generated index
                db.deferred_sql[i] = re.sub(r'""', r'"', db.deferred_sql[i])
                if string.find(db.deferred_sql[i], 'CREATE INDEX') == 0:
                    subs = db.deferred_sql[i]
                    # Truncate the index name. This could be done more elegantly
                    db.deferred_sql[i] = subs[0:14] + subs[14:string.index(subs, '" ON')][-63:] + subs[string.index(subs, '" ON'):]

        try:
            db.execute_deferred_sql()
        except Exception, e:
            raise Exception("The table {table_name} was not created. Original exception: {message}. Deferred sql calls: {sql}".format(table_name=model_class._meta.db_table, message=e.message, sql='\n'.join(db.deferred_sql)))
        # TODO I don't know if or when this is needed.
        if transaction.is_managed():
            transaction.commit()
Exemple #10
0
 def get_table_description(self, cursor, full_table_name):
     """
         Override the parent method to take schemas into account, sigh
     :param cursor:
     :param full_table_name:
     :return:
     """
     schema, table = parse_schema_and_table(full_table_name)
     # conn = psycopg2.connect(**pg_connection_parameters(settings.DATABASES['default']))
     # conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
     # cursor = conn.cursor()
     cursor.execute("""
         SELECT column_name, is_nullable
         FROM information_schema.columns
         WHERE table_name = %s and table_schema = %s""", [table, schema])
     null_map = dict(cursor.fetchall())
     cursor.execute('SELECT * FROM "{schema}"."{table}" LIMIT 1'.format(schema=schema, table=table))
     return [tuple([item for item in line[:6]] + [null_map[line[0]]==u'YES'])
             for line in cursor.description]
Exemple #11
0
    def run_water_calculations(self, **kwargs):
        self.format_policy_inputs()

        self.water_class = self.config_entity.db_entity_feature_class(DbEntityKey.WATER)
        self.base_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)
        self.climate_zone_class = self.config_entity.db_entity_feature_class(DbEntityKey.CLIMATE_ZONES)
        self.rel_table = parse_schema_and_table(self.water_class._meta.db_table)[1]
        self.rel_column = self.water_class._meta.parents.values()[0].column

        if isinstance(self.config_entity.subclassed, FutureScenario):
            self.report_progress(0.2, **kwargs)
            self.end_state_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE)


            water_output_list, options = self.run_future_water_calculations(**kwargs)
        else:
            self.report_progress(0.2, **kwargs)
            water_output_list, options = self.run_base_water_calculations()
            self.report_progress(0.7, **kwargs)

        self.write_water_results_to_database(options, water_output_list)

        updated = datetime.datetime.now()
        truncate_table(options['water_schema'] + '.' + self.rel_table)

        pSql = '''
        insert into {water_schema}.{rel_table} ({rel_column}, updated) select id, '{updated}' from {water_schema}.{water_result_table};'''.format(
            water_schema=options['water_schema'],
            water_result_table=options['water_result_table'],
            rel_table=self.rel_table,
            rel_column=self.rel_column,
            updated=updated)

        execute_sql(pSql)

        from footprint.main.publishing.data_import_publishing import create_and_populate_relations
        create_and_populate_relations(self.config_entity, self.config_entity.computed_db_entities(key=DbEntityKey.WATER)[0])
        self.report_progress(0.10000001, **kwargs)
    def get_indexes(self, cursor, table_name):
        """
            OVERRIDDEN to work with schemas, sigh

        Returns a dictionary of fieldname -> infodict for the given table,
        where each infodict is in the format:
            {'primary_key': boolean representing whether it's the primary key,
             'unique': boolean representing whether it's a unique index}
        """
        schema, table = parse_schema_and_table(table_name)
        # This query retrieves each index on the given table, including the
        # first associated field name
        cursor.execute(
            """
            SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
            FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
                pg_catalog.pg_index idx, pg_catalog.pg_attribute attr,
                information_schema.columns isc
            WHERE c.oid = idx.indrelid
                AND idx.indexrelid = c2.oid
                AND attr.attrelid = c.oid
                AND attr.attnum = idx.indkey[0]
                AND c.relname = %s
                AND c.relname = isc.table_name
                AND isc.table_schema = %s
                AND isc.column_name = attr.attname
                """, [table, schema])
        indexes = {}
        for row in cursor.fetchall():
            # row[1] (idx.indkey) is stored in the DB as an array. It comes out as
            # a string of space-separated integers. This designates the field
            # indexes (1-based) of the fields that have indexes on the table.
            # Here, we skip any indexes across multiple fields.
            if ' ' in row[1]:
                continue
            indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
        return indexes
Exemple #13
0
    def get_indexes(self, cursor, table_name):
        """
            OVERRIDDEN to work with schemas, sigh

        Returns a dictionary of fieldname -> infodict for the given table,
        where each infodict is in the format:
            {'primary_key': boolean representing whether it's the primary key,
             'unique': boolean representing whether it's a unique index}
        """
        schema, table = parse_schema_and_table(table_name)
        # This query retrieves each index on the given table, including the
        # first associated field name
        cursor.execute("""
            SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
            FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
                pg_catalog.pg_index idx, pg_catalog.pg_attribute attr,
                information_schema.columns isc
            WHERE c.oid = idx.indrelid
                AND idx.indexrelid = c2.oid
                AND attr.attrelid = c.oid
                AND attr.attnum = idx.indkey[0]
                AND c.relname = %s
                AND c.relname = isc.table_name
                AND isc.table_schema = %s
                AND isc.column_name = attr.attname
                """, [table, schema])
        indexes = {}
        for row in cursor.fetchall():
            # row[1] (idx.indkey) is stored in the DB as an array. It comes out as
            # a string of space-separated integers. This designates the field
            # indexes (1-based) of the fields that have indexes on the table.
            # Here, we skip any indexes across multiple fields.
            if ' ' in row[1]:
                continue
            indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
        return indexes
Exemple #14
0
    def get_fields(cls, table_name):
        """
            Modifies the original handle_inspection to instead get all the fields of the table described by the db_entity
        :param db_entity:
        :return: A dict keyed by field named and valued by models.Field or variant instance
        """

        connection = connections['default']

        table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')

        cursor = connection.cursor()

        try:
            relations = connection.introspection.get_relations(cursor, table_name)
        except NotImplementedError:
            relations = {}
        try:
            indexes = connection.introspection.get_indexes(cursor, table_name)
        except NotImplementedError:
            indexes = {}

        schema, table = parse_schema_and_table(table_name)
        # Fill this dict with field definitions
        fields = {}
        for i, row in enumerate(cls.get_table_description(cursor, table, schema, connection)):
            column_name = row[0]
            att_name = column_name.lower()
            comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
            extra_params = {}  # Holds Field parameters such as 'db_column'.

            # If the column name can't be used verbatim as a Python
            # attribute, set the "db_column" for this Field.
            if ' ' in att_name or '-' in att_name or keyword.iskeyword(att_name) or column_name != att_name:
                extra_params['db_column'] = column_name

            # Add primary_key and unique, if necessary.
            if column_name in indexes:
                if indexes[column_name]['primary_key']:
                    extra_params['primary_key'] = True
                elif indexes[column_name]['unique']:
                    extra_params['unique'] = True

            # Modify the field name to make it Python-compatible.
            if ' ' in att_name:
                att_name = att_name.replace(' ', '_')
                comment_notes.append('Field renamed to remove spaces.')

            if '-' in att_name:
                att_name = att_name.replace('-', '_')
                comment_notes.append('Field renamed to remove dashes.')

            if column_name != att_name:
                comment_notes.append('Field name made lowercase.')

            if i in relations:
                rel_to = relations[i][1] == table_name and "'cls'" or table2model(relations[i][1])
                field_type = 'ForeignKey(%s' % rel_to
                if att_name.endswith('_id'):
                    att_name = att_name[:-3]
                else:
                    extra_params['db_column'] = column_name
            else:
                # Calling `get_field_type` to get the field type string and any
                # additional paramters and notes.
                field_type, field_params, field_notes = cls.get_field_type(connection, table_name, row)
                extra_params.update(field_params)
                comment_notes.extend(field_notes)

                field_type += '('

            if keyword.iskeyword(att_name):
                att_name += '_field'
                comment_notes.append('Field renamed because it was a Python reserved word.')

            if att_name[0].isdigit():
                att_name = 'number_%s' % att_name
                extra_params['db_column'] = unicode(column_name)
                comment_notes.append("Field renamed because it wasn't a "
                                     "valid Python identifier.")

            # Don't output 'id = meta.AutoField(primary_key=True)', because
            # that's assumed if it doesn't exist.
            if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
                continue

            # Add 'null' and 'blank', if the 'null_ok' flag was present in the
            # table description.
            if row[6]: # If it's NULL...
                extra_params['blank'] = True
                # Don't know why these are here, commenting out
                #if not field_type in ('TextField(', 'CharField('):
                extra_params['null'] = True

            field_desc = 'models.%s' % field_type
            if extra_params:
                if not field_desc.endswith('('):
                    field_desc += ', '
                field_desc += ', '.join(['%s=%r' % (k, v) for k, v in extra_params.items()])
            field_desc += ')'
            if comment_notes:
                field_desc += ' # ' + ' '.join(comment_notes)
            # Set the dict key/value to the field name and the evaluated field description
            fields[att_name] = eval(field_desc)
            fields[att_name].name = att_name
        return fields
Exemple #15
0
    def update(self, **kwargs):

        # Make sure all related models have been created before querying
        logger.info("Executing Vmt using {0}".format(self.config_entity))

        self.vmt_progress(0.1, **kwargs)

        vmt_result_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT)
        vmt_variables_feature_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_VARIABLES)
        census_rates_feature_class = self.config_entity.db_entity_feature_class(DbEntityKey.CENSUS_RATES)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            scenario_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE)
            trip_lengths_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_FUTURE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(DbEntityKey.FUTURE_TRANSIT_STOPS)
            is_future = True
        else:
            scenario_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)
            trip_lengths_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_BASE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_TRANSIT_STOPS)
            is_future = False

        sql_config_dict = dict(
            vmt_result_table=vmt_result_class.db_entity_key,
            vmt_schema=parse_schema_and_table(vmt_result_class._meta.db_table)[0],
            uf_canvas_table=scenario_class.db_entity_key,
            uf_canvas_schema=parse_schema_and_table(scenario_class._meta.db_table)[0],
            census_rates_table=census_rates_feature_class.db_entity_key,
            census_rates_schema=parse_schema_and_table(census_rates_feature_class._meta.db_table)[0],
            trip_lengths_table=trip_lengths_class.db_entity_key,
            trip_lengths_schema=parse_schema_and_table(trip_lengths_class._meta.db_table)[0],
            vmt_variables_table=vmt_variables_feature_class.db_entity_key,
            vmt_variables_schema=parse_schema_and_table(vmt_variables_feature_class._meta.db_table)[0],
            vmt_rel_table=parse_schema_and_table(vmt_result_class._meta.db_table)[1],
            vmt_rel_column=vmt_result_class._meta.parents.values()[0].column,
            transit_stop_schema=parse_schema_and_table(transit_stop_class._meta.db_table)[0],
            transit_stop_table=transit_stop_class.db_entity_key,
            config_entity=self.config_entity
        )
        #
        if not kwargs.get('postprocess_only'):
            self.run_vmt_preprocesses(sql_config_dict, **kwargs)

        drop_table('{vmt_schema}.{vmt_result_table}'.format(**sql_config_dict))
        truncate_table('{vmt_schema}.{vmt_rel_table}'.format(**sql_config_dict))

        attribute_list = filter(lambda x: x != 'id', vmt_output_field_list)
        output_field_syntax = 'id int, ' + create_sql_calculations(attribute_list, '{0} numeric(14, 4)')

        pSql = '''
        create table {vmt_schema}.{vmt_result_table} ({output_field_syntax});'''.format(
            output_field_syntax=output_field_syntax, **sql_config_dict)
        execute_sql(pSql)

        trip_lengths = DbEntityKey.VMT_FUTURE_TRIP_LENGTHS if is_future else DbEntityKey.VMT_BASE_TRIP_LENGTHS
        total_employment = scenario_class.objects.aggregate(Sum('emp'))
        all_features = scenario_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0))
        all_features_length = len(all_features)

        max_id = scenario_class.objects.all().order_by("-id")[0].id
        min_id = scenario_class.objects.all().order_by("id")[0].id

         # This section of the model passes data from POSTGRES into Python and is saved in memory before being committed
        # back to the database. In order to not use all memory with large datasets, jobs are broken up with a maximum
        # job size of JOB_SIZE rows before being committed to the database. It will iterate through until all rows are
        # calculated and committed.
        if all_features_length > self.JOB_SIZE:
            job_count = all_features_length / self.JOB_SIZE
            rows_per_range = (max_id - min_id) / job_count
        else:
            rows_per_range = max_id - min_id
            job_count = 1
        print 'Job Count: {0}'.format(job_count)
        start_id = min_id

        for i in range(job_count):
            if i == job_count - 1:
                end_id = max_id
            else:
                end_id = start_id + rows_per_range - 1
            logger.info('Job: {0}'.format(i))
            logger.info('Start Id: {0}'.format(start_id))
            logger.info('End Id: {0}'.format(end_id))

            vmt_output_list = []

            features = all_features.filter(id__range=(start_id, end_id))
            annotated_features = annotated_related_feature_class_pk_via_geographies(features, self.config_entity, [
                DbEntityKey.VMT_VARIABLES, DbEntityKey.CENSUS_RATES, DbEntityKey.VMT_FUTURE_TRIP_LENGTHS, DbEntityKey.VMT_BASE_TRIP_LENGTHS, trip_lengths])

            assert annotated_features.exists(), "VMT is about to process 0 results"

            failed_features = []

            for feature in annotated_features:
                trip_length_id = feature.vmt_future_trip_lengths if is_future else feature.vmt_base_trip_lengths
                try:
                    trip_lengths_feature = trip_lengths_class.objects.get(id=trip_length_id)
                except trip_lengths_class.DoesNotExist, e:
                    failed_features.append(feature)
                    logger.error('Cannot find trip lengths for geography with id = {0}'.format(feature.id))
                    continue

                vmt_variables_feature = vmt_variables_feature_class.objects.get(id=feature.vmt_variables)

                try:
                    census_rates_feature = census_rates_feature_class.objects.get(id=feature.census_rates)
                except census_rates_feature_class.DoesNotExist, e:
                    logger.error('Cannot find census rate with id = {0}'.format(feature.census_rates))
                    continue

                vmt_feature = dict(
                    id=int(feature.id),
                    acres_gross=float(feature.acres_gross) or 0,
                    acres_parcel=float(feature.acres_parcel) or 0,
                    acres_parcel_res=float(feature.acres_parcel_res) or 0,
                    acres_parcel_emp=float(feature.acres_parcel_emp) or 0,
                    acres_parcel_mixed=float(feature.acres_parcel_mixed_use) or 0,
                    intersections_qtrmi=float(feature.intersection_density_sqmi) or 0,
                    du=float(feature.du) or 0,
                    du_occupancy_rate=float(feature.hh / feature.du if feature.du else 0),
                    du_detsf=float(feature.du_detsf) or 0,
                    du_attsf=float(feature.du_attsf) or 0,

                    du_mf=float(feature.du_mf) or 0,
                    du_mf2to4=float(feature.du_mf2to4) or 0,
                    du_mf5p=float(feature.du_mf5p) or 0,
                    hh=float(feature.hh) or 0,
                    hh_avg_size=float(feature.pop / feature.hh if feature.hh > 0 else 0),
                    hh_avg_inc=float(census_rates_feature.hh_agg_inc_rate) or 0,

                    hh_inc_00_10=float(feature.hh * census_rates_feature.hh_inc_00_10_rate) or 0,
                    hh_inc_10_20=float(feature.hh * census_rates_feature.hh_inc_10_20_rate) or 0,
                    hh_inc_20_30=float(feature.hh * census_rates_feature.hh_inc_20_30_rate) or 0,
                    hh_inc_30_40=float(feature.hh * census_rates_feature.hh_inc_30_40_rate) or 0,
                    hh_inc_40_50=float(feature.hh * census_rates_feature.hh_inc_40_50_rate) or 0,
                    hh_inc_50_60=float(feature.hh * census_rates_feature.hh_inc_50_60_rate) or 0,
                    hh_inc_60_75=float(feature.hh * census_rates_feature.hh_inc_60_75_rate) or 0,
                    hh_inc_75_100=float(feature.hh * census_rates_feature.hh_inc_75_100_rate) or 0,
                    hh_inc_100p=float(feature.hh * (census_rates_feature.hh_inc_100_125_rate +
                                                                     census_rates_feature.hh_inc_125_150_rate +
                                                                     census_rates_feature.hh_inc_150_200_rate +
                                                                     census_rates_feature.hh_inc_200p_rate)) or 0,

                    pop=float(feature.pop) or 0,
                    pop_employed=float(feature.pop * census_rates_feature.pop_age16_up_rate *
                                       census_rates_feature.pop_employed_rate) or 0,
                    pop_age16_up=float(feature.pop * census_rates_feature.pop_age16_up_rate) or 0,
                    pop_age65_up=float(feature.pop * census_rates_feature.pop_age65_up_rate) or 0,

                    emp=float(feature.emp) or 0,
                    emp_retail=float(feature.emp_retail_services + feature.emp_other_services) or 0,
                    emp_restaccom=float(feature.emp_accommodation + feature.emp_restaurant) or 0,
                    emp_arts_entertainment=float(feature.emp_arts_entertainment) or 0,
                    emp_office=float(feature.emp_off) or 0,
                    emp_public=float(feature.emp_public_admin + feature.emp_education) or 0,
                    emp_industry=float(feature.emp_ind + feature.emp_ag) or 0,

                    emp_within_1mile=float(vmt_variables_feature.emp_1mile) or 0,
                    hh_within_quarter_mile_trans=1 if vmt_variables_feature.transit_1km > 0 else 0,

                    vb_acres_parcel_res_total=float(vmt_variables_feature.acres_parcel_res_vb) or 0,
                    vb_acres_parcel_emp_total=float(vmt_variables_feature.acres_parcel_emp_vb) or 0,
                    vb_acres_parcel_mixed_total=float(vmt_variables_feature.acres_parcel_mixed_use_vb) or 0,
                    vb_du_total=float(vmt_variables_feature.du_vb) or 0,
                    vb_pop_total=float(vmt_variables_feature.pop_vb) or 0,
                    vb_emp_total=float(vmt_variables_feature.emp_vb) or 0,
                    vb_emp_retail_total=float(vmt_variables_feature.emp_ret_vb) or 0,
                    vb_hh_total=float(vmt_variables_feature.hh_vb) or 0,
                    vb_du_mf_total=float(vmt_variables_feature.du_mf_vb) or 0,
                    vb_hh_inc_00_10_total=float(vmt_variables_feature.hh_inc_00_10_vb) or 0,
                    vb_hh_inc_10_20_total=float(vmt_variables_feature.hh_inc_10_20_vb) or 0,
                    vb_hh_inc_20_30_total=float(vmt_variables_feature.hh_inc_20_30_vb) or 0,
                    vb_hh_inc_30_40_total=float(vmt_variables_feature.hh_inc_30_40_vb) or 0,
                    vb_hh_inc_40_50_total=float(vmt_variables_feature.hh_inc_40_50_vb) or 0,
                    vb_hh_inc_50_60_total=float(vmt_variables_feature.hh_inc_50_60_vb) or 0,
                    vb_hh_inc_60_75_total=float(vmt_variables_feature.hh_inc_60_75_vb) or 0,
                    vb_hh_inc_75_100_total=float(vmt_variables_feature.hh_inc_75_100_vb) or 0,
                    vb_hh_inc_100p_total=float(vmt_variables_feature.hh_inc_100p_vb) or 0,

                    vb_pop_employed_total=float(vmt_variables_feature.pop_employed_vb) or 0,
                    vb_pop_age16_up_total=float(vmt_variables_feature.pop_age16_up_vb) or 0,
                    vb_pop_age65_up_total=float(vmt_variables_feature.pop_age65_up_vb) or 0,

                    emp30m_transit=float(trip_lengths_feature.emp_30min_transit) or 0,
                    emp45m_transit=float(trip_lengths_feature.emp_45min_transit) or 0,
                    prod_hbw=float(trip_lengths_feature.productions_hbw) or 0,
                    prod_hbo=float(trip_lengths_feature.productions_hbo) or 0,
                    prod_nhb=float(trip_lengths_feature.productions_nhb) or 0,
                    attr_hbw=float(trip_lengths_feature.attractions_hbw) or 0,
                    attr_hbo=float(trip_lengths_feature.attractions_hbo) or 0,
                    attr_nhb=float(trip_lengths_feature.attractions_nhb) or 0,

                    qmb_acres_parcel_res_total=float(vmt_variables_feature.acres_parcel_res_qtrmi) or 0,
                    qmb_acres_parcel_emp_total=float(vmt_variables_feature.acres_parcel_emp_qtrmi) or 0,
                    qmb_acres_parcel_mixed_total=float(vmt_variables_feature.acres_parcel_mixed_use_qtrmi) or 0,
                    qmb_du_total=float(vmt_variables_feature.du_qtrmi) or 0,
                    qmb_pop_total=float(vmt_variables_feature.pop_qtrmi) or 0,
                    qmb_emp_total=float(vmt_variables_feature.emp_qtrmi) or 0,
                    qmb_emp_retail=float(vmt_variables_feature.emp_ret_qtrmi) or 0,
                    hh_avg_veh=float(census_rates_feature.hh_agg_veh_rate) or 0,

                    truck_adjustment_factor=0.031,
                    total_employment=float(total_employment['emp__sum']) or 0)

                # run raw trip generation
                vmt_feature_trips = generate_raw_trips(vmt_feature)

                # run trip purpose splits
                vmt_feature_trip_purposes = calculate_trip_purpose_splits(vmt_feature_trips)

                # run log odds
                vmt_feature_log_odds = calculate_log_odds(vmt_feature_trip_purposes)

                # run vmt equations
                vmt_output = calculate_final_vmt_results(vmt_feature_log_odds)

                # filters the vmt feature dictionary for specific output fields for writing to the database
                output_list = map(lambda key: vmt_output[key], vmt_output_field_list)
                vmt_output_list.append(output_list)
    def update(self, **kwargs):

        # Make sure all related models have been created before querying
        logger.info("Executing Vmt using {0}".format(self.config_entity))

        self.vmt_progress(0.1, **kwargs)

        vmt_result_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.VMT)
        vmt_variables_feature_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.VMT_VARIABLES)
        census_rates_feature_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.CENSUS_RATES)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            scenario_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.END_STATE)
            trip_lengths_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.VMT_FUTURE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.FUTURE_TRANSIT_STOPS)
            is_future = True
        else:
            scenario_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.BASE_CANVAS)
            trip_lengths_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.VMT_BASE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.BASE_TRANSIT_STOPS)
            is_future = False

        sql_config_dict = dict(
            vmt_result_table=vmt_result_class.db_entity_key,
            vmt_schema=parse_schema_and_table(
                vmt_result_class._meta.db_table)[0],
            uf_canvas_table=scenario_class.db_entity_key,
            uf_canvas_schema=parse_schema_and_table(
                scenario_class._meta.db_table)[0],
            census_rates_table=census_rates_feature_class.db_entity_key,
            census_rates_schema=parse_schema_and_table(
                census_rates_feature_class._meta.db_table)[0],
            trip_lengths_table=trip_lengths_class.db_entity_key,
            trip_lengths_schema=parse_schema_and_table(
                trip_lengths_class._meta.db_table)[0],
            vmt_variables_table=vmt_variables_feature_class.db_entity_key,
            vmt_variables_schema=parse_schema_and_table(
                vmt_variables_feature_class._meta.db_table)[0],
            vmt_rel_table=parse_schema_and_table(
                vmt_result_class._meta.db_table)[1],
            vmt_rel_column=vmt_result_class._meta.parents.values()[0].column,
            transit_stop_schema=parse_schema_and_table(
                transit_stop_class._meta.db_table)[0],
            transit_stop_table=transit_stop_class.db_entity_key,
            config_entity=self.config_entity)
        #
        if not kwargs.get('postprocess_only'):
            self.run_vmt_preprocesses(sql_config_dict, **kwargs)

        drop_table('{vmt_schema}.{vmt_result_table}'.format(**sql_config_dict))
        truncate_table(
            '{vmt_schema}.{vmt_rel_table}'.format(**sql_config_dict))

        attribute_list = filter(lambda x: x != 'id', vmt_output_field_list)
        output_field_syntax = 'id int, ' + create_sql_calculations(
            attribute_list, '{0} numeric(14, 4)')

        pSql = '''
        create table {vmt_schema}.{vmt_result_table} ({output_field_syntax});'''.format(
            output_field_syntax=output_field_syntax, **sql_config_dict)
        execute_sql(pSql)

        trip_lengths = DbEntityKey.VMT_FUTURE_TRIP_LENGTHS if is_future else DbEntityKey.VMT_BASE_TRIP_LENGTHS
        total_employment = scenario_class.objects.aggregate(Sum('emp'))
        all_features = scenario_class.objects.filter(
            Q(du__gt=0) | Q(emp__gt=0))
        all_features_length = len(all_features)

        max_id = scenario_class.objects.all().order_by("-id")[0].id
        min_id = scenario_class.objects.all().order_by("id")[0].id

        # This section of the model passes data from POSTGRES into Python and is saved in memory before being committed
        # back to the database. In order to not use all memory with large datasets, jobs are broken up with a maximum
        # job size of JOB_SIZE rows before being committed to the database. It will iterate through until all rows are
        # calculated and committed.
        if all_features_length > self.JOB_SIZE:
            job_count = all_features_length / self.JOB_SIZE
            rows_per_range = (max_id - min_id) / job_count
        else:
            rows_per_range = max_id - min_id
            job_count = 1
        print 'Job Count: {0}'.format(job_count)
        start_id = min_id

        for i in range(job_count):
            if i == job_count - 1:
                end_id = max_id
            else:
                end_id = start_id + rows_per_range - 1
            logger.info('Job: {0}'.format(i))
            logger.info('Start Id: {0}'.format(start_id))
            logger.info('End Id: {0}'.format(end_id))

            vmt_output_list = []

            features = all_features.filter(id__range=(start_id, end_id))
            annotated_features = annotated_related_feature_class_pk_via_geographies(
                features, self.config_entity, [
                    DbEntityKey.VMT_VARIABLES, DbEntityKey.CENSUS_RATES,
                    DbEntityKey.VMT_FUTURE_TRIP_LENGTHS,
                    DbEntityKey.VMT_BASE_TRIP_LENGTHS, trip_lengths
                ])

            assert annotated_features.exists(
            ), "VMT is about to process 0 results"

            failed_features = []

            for feature in annotated_features:
                trip_length_id = feature.vmt_future_trip_lengths if is_future else feature.vmt_base_trip_lengths
                try:
                    trip_lengths_feature = trip_lengths_class.objects.get(
                        id=trip_length_id)
                except trip_lengths_class.DoesNotExist, e:
                    failed_features.append(feature)
                    logger.error(
                        'Cannot find trip lengths for geography with id = {0}'.
                        format(feature.id))
                    continue

                vmt_variables_feature = vmt_variables_feature_class.objects.get(
                    id=feature.vmt_variables)

                try:
                    census_rates_feature = census_rates_feature_class.objects.get(
                        id=feature.census_rates)
                except census_rates_feature_class.DoesNotExist, e:
                    logger.error(
                        'Cannot find census rate with id = {0}'.format(
                            feature.census_rates))
                    continue

                vmt_feature = dict(
                    id=int(feature.id),
                    acres_gross=float(feature.acres_gross) or 0,
                    acres_parcel=float(feature.acres_parcel) or 0,
                    acres_parcel_res=float(feature.acres_parcel_res) or 0,
                    acres_parcel_emp=float(feature.acres_parcel_emp) or 0,
                    acres_parcel_mixed=float(feature.acres_parcel_mixed_use)
                    or 0,
                    intersections_qtrmi=float(
                        feature.intersection_density_sqmi) or 0,
                    du=float(feature.du) or 0,
                    du_occupancy_rate=float(feature.hh /
                                            feature.du if feature.du else 0),
                    du_detsf=float(feature.du_detsf) or 0,
                    du_attsf=float(feature.du_attsf) or 0,
                    du_mf=float(feature.du_mf) or 0,
                    du_mf2to4=float(feature.du_mf2to4) or 0,
                    du_mf5p=float(feature.du_mf5p) or 0,
                    hh=float(feature.hh) or 0,
                    hh_avg_size=float(feature.pop /
                                      feature.hh if feature.hh > 0 else 0),
                    hh_avg_inc=float(census_rates_feature.hh_agg_inc_rate)
                    or 0,
                    hh_inc_00_10=float(
                        feature.hh * census_rates_feature.hh_inc_00_10_rate)
                    or 0,
                    hh_inc_10_20=float(
                        feature.hh * census_rates_feature.hh_inc_10_20_rate)
                    or 0,
                    hh_inc_20_30=float(
                        feature.hh * census_rates_feature.hh_inc_20_30_rate)
                    or 0,
                    hh_inc_30_40=float(
                        feature.hh * census_rates_feature.hh_inc_30_40_rate)
                    or 0,
                    hh_inc_40_50=float(
                        feature.hh * census_rates_feature.hh_inc_40_50_rate)
                    or 0,
                    hh_inc_50_60=float(
                        feature.hh * census_rates_feature.hh_inc_50_60_rate)
                    or 0,
                    hh_inc_60_75=float(
                        feature.hh * census_rates_feature.hh_inc_60_75_rate)
                    or 0,
                    hh_inc_75_100=float(
                        feature.hh * census_rates_feature.hh_inc_75_100_rate)
                    or 0,
                    hh_inc_100p=float(
                        feature.hh *
                        (census_rates_feature.hh_inc_100_125_rate +
                         census_rates_feature.hh_inc_125_150_rate +
                         census_rates_feature.hh_inc_150_200_rate +
                         census_rates_feature.hh_inc_200p_rate)) or 0,
                    pop=float(feature.pop) or 0,
                    pop_employed=float(
                        feature.pop * census_rates_feature.pop_age16_up_rate *
                        census_rates_feature.pop_employed_rate) or 0,
                    pop_age16_up=float(
                        feature.pop * census_rates_feature.pop_age16_up_rate)
                    or 0,
                    pop_age65_up=float(
                        feature.pop * census_rates_feature.pop_age65_up_rate)
                    or 0,
                    emp=float(feature.emp) or 0,
                    emp_retail=float(feature.emp_retail_services +
                                     feature.emp_other_services) or 0,
                    emp_restaccom=float(feature.emp_accommodation +
                                        feature.emp_restaurant) or 0,
                    emp_arts_entertainment=float(
                        feature.emp_arts_entertainment) or 0,
                    emp_office=float(feature.emp_off) or 0,
                    emp_public=float(feature.emp_public_admin +
                                     feature.emp_education) or 0,
                    emp_industry=float(feature.emp_ind + feature.emp_ag) or 0,
                    emp_within_1mile=float(vmt_variables_feature.emp_1mile)
                    or 0,
                    hh_within_quarter_mile_trans=1
                    if vmt_variables_feature.transit_1km > 0 else 0,
                    vb_acres_parcel_res_total=float(
                        vmt_variables_feature.acres_parcel_res_vb) or 0,
                    vb_acres_parcel_emp_total=float(
                        vmt_variables_feature.acres_parcel_emp_vb) or 0,
                    vb_acres_parcel_mixed_total=float(
                        vmt_variables_feature.acres_parcel_mixed_use_vb) or 0,
                    vb_du_total=float(vmt_variables_feature.du_vb) or 0,
                    vb_pop_total=float(vmt_variables_feature.pop_vb) or 0,
                    vb_emp_total=float(vmt_variables_feature.emp_vb) or 0,
                    vb_emp_retail_total=float(vmt_variables_feature.emp_ret_vb)
                    or 0,
                    vb_hh_total=float(vmt_variables_feature.hh_vb) or 0,
                    vb_du_mf_total=float(vmt_variables_feature.du_mf_vb) or 0,
                    vb_hh_inc_00_10_total=float(
                        vmt_variables_feature.hh_inc_00_10_vb) or 0,
                    vb_hh_inc_10_20_total=float(
                        vmt_variables_feature.hh_inc_10_20_vb) or 0,
                    vb_hh_inc_20_30_total=float(
                        vmt_variables_feature.hh_inc_20_30_vb) or 0,
                    vb_hh_inc_30_40_total=float(
                        vmt_variables_feature.hh_inc_30_40_vb) or 0,
                    vb_hh_inc_40_50_total=float(
                        vmt_variables_feature.hh_inc_40_50_vb) or 0,
                    vb_hh_inc_50_60_total=float(
                        vmt_variables_feature.hh_inc_50_60_vb) or 0,
                    vb_hh_inc_60_75_total=float(
                        vmt_variables_feature.hh_inc_60_75_vb) or 0,
                    vb_hh_inc_75_100_total=float(
                        vmt_variables_feature.hh_inc_75_100_vb) or 0,
                    vb_hh_inc_100p_total=float(
                        vmt_variables_feature.hh_inc_100p_vb) or 0,
                    vb_pop_employed_total=float(
                        vmt_variables_feature.pop_employed_vb) or 0,
                    vb_pop_age16_up_total=float(
                        vmt_variables_feature.pop_age16_up_vb) or 0,
                    vb_pop_age65_up_total=float(
                        vmt_variables_feature.pop_age65_up_vb) or 0,
                    emp30m_transit=float(
                        trip_lengths_feature.emp_30min_transit) or 0,
                    emp45m_transit=float(
                        trip_lengths_feature.emp_45min_transit) or 0,
                    prod_hbw=float(trip_lengths_feature.productions_hbw) or 0,
                    prod_hbo=float(trip_lengths_feature.productions_hbo) or 0,
                    prod_nhb=float(trip_lengths_feature.productions_nhb) or 0,
                    attr_hbw=float(trip_lengths_feature.attractions_hbw) or 0,
                    attr_hbo=float(trip_lengths_feature.attractions_hbo) or 0,
                    attr_nhb=float(trip_lengths_feature.attractions_nhb) or 0,
                    qmb_acres_parcel_res_total=float(
                        vmt_variables_feature.acres_parcel_res_qtrmi) or 0,
                    qmb_acres_parcel_emp_total=float(
                        vmt_variables_feature.acres_parcel_emp_qtrmi) or 0,
                    qmb_acres_parcel_mixed_total=float(
                        vmt_variables_feature.acres_parcel_mixed_use_qtrmi)
                    or 0,
                    qmb_du_total=float(vmt_variables_feature.du_qtrmi) or 0,
                    qmb_pop_total=float(vmt_variables_feature.pop_qtrmi) or 0,
                    qmb_emp_total=float(vmt_variables_feature.emp_qtrmi) or 0,
                    qmb_emp_retail=float(vmt_variables_feature.emp_ret_qtrmi)
                    or 0,
                    hh_avg_veh=float(census_rates_feature.hh_agg_veh_rate)
                    or 0,
                    truck_adjustment_factor=0.031,
                    total_employment=float(total_employment['emp__sum']) or 0)

                # run raw trip generation
                vmt_feature_trips = generate_raw_trips(vmt_feature)

                # run trip purpose splits
                vmt_feature_trip_purposes = calculate_trip_purpose_splits(
                    vmt_feature_trips)

                # run log odds
                vmt_feature_log_odds = calculate_log_odds(
                    vmt_feature_trip_purposes)

                # run vmt equations
                vmt_output = calculate_final_vmt_results(vmt_feature_log_odds)

                # filters the vmt feature dictionary for specific output fields for writing to the database
                output_list = map(lambda key: vmt_output[key],
                                  vmt_output_field_list)
                vmt_output_list.append(output_list)
    def update(self, **kwargs):
        """
            This function handles the update or creation on the environmental constraints geography producing the area
            for each layer with the environmental constraint behavior. This function will both add and remove
            constraints and produce the final constraints layer in the primary geography of the active scenario
        """
        # TODO : remove hard-coded 3310 (only works in CA), need to set an "analysis projection" in the Region
        start_time = time.time()

        current_db_entities = \
            set(self.config_entity.db_entities_having_behavior_key(BehaviorKey.Fab.ricate('environmental_constraint')))

        base_feature_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.BASE_CANVAS)

        options = dict(project_schema=parse_schema_and_table(
            base_feature_class._meta.db_table)[0],
                       base_table=base_feature_class.db_entity_key)

        logger.info('Inserting raw geographies into the environmental constraint geographies table for DbEntities: %s' % \
                    ', '.join(map(lambda db_entity: db_entity.name, current_db_entities)))

        drop_table(
            '{project_schema}.environmental_constraint_geographies_table'.
            format(project_schema=options['project_schema']))

        current_environmental_constraints = []
        for db_entity in current_db_entities:
            constraint_class = self.config_entity.db_entity_feature_class(
                db_entity.key)
            current_environmental_constraints.append(
                constraint_class.db_entity_key)

        create_id_field_format = create_sql_calculations(
            current_environmental_constraints, '{0}_id int')
        insert_id_field_format = create_sql_calculations(
            current_environmental_constraints, '{0}_id')

        pSql = '''
        create table {project_schema}.environmental_constraint_geographies_table
            (primary_id integer, wkb_geometry geometry, {create_id_field_format});
        SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table', 'wkb_geometry', 3310)

        '''.format(project_schema=options['project_schema'],
                   create_id_field_format=create_id_field_format)

        execute_sql(pSql)

        for db_entity in current_db_entities:
            logger.info(
                'Inserting into environmental constraint geographies table for DbEntity: %s'
                % db_entity.full_name)

            constraint_class = self.config_entity.db_entity_feature_class(
                db_entity.key)

            pSql = '''
                insert into {project_schema}.environmental_constraint_geographies_table (primary_id, wkb_geometry, {constraint_db_entity_key}_id) select
                    cast(primary_id as int), wkb_geometry, {constraint_db_entity_key}_id from (
                    select
                        id as primary_id,
                        {constraint_db_entity_id} as {constraint_db_entity_key}_id,
                        st_setSRID(st_transform(st_buffer((st_dump(wkb_geometry)).geom, 0), 3310), 3310) as wkb_geometry

                    from (
                        select b.id, st_intersection(a.wkb_geometry, b.wkb_geometry) as wkb_geometry
	                    from {constraint_schema}.{constraint_db_entity_key} a,
                        {project_schema}.{base_table} b
                            where st_intersects(a.wkb_geometry, b.wkb_geometry)) as intersection
                    ) as polygons;
                '''.format(
                project_schema=options['project_schema'],
                base_table=options['base_table'],
                constraint_schema=parse_schema_and_table(
                    constraint_class._meta.db_table)[0],
                constraint_db_entity_key=constraint_class.db_entity_key,
                constraint_db_entity_id=db_entity.id)

            execute_sql(pSql)

            logger.info(
                'finished inserting db_entity: {db_entity} {time} elapsed'.
                format(time=time.time() - start_time,
                       db_entity=constraint_class.db_entity_key))

        #only regenerate the merged environmental constraint whenever an envrionmental constraint is added or removed
        # from the layer

        add_geom_idx(options['project_schema'],
                     'environmental_constraint_geographies_table')

        logger.info('Unioning all environmental constraint geographies')
        drop_table(
            '{project_schema}.environmental_constraint_geographies_table_unioned'
            .format(project_schema=options['project_schema']))

        pSql = '''
            CREATE TABLE {project_schema}.environmental_constraint_geographies_table_unioned
                (id serial, wkb_geometry geometry, acres float, primary_id int, {create_id_field_format});
            SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table_unioned', 'wkb_geometry', 3310);
        '''.format(project_schema=options['project_schema'],
                   create_id_field_format=create_id_field_format)

        execute_sql(pSql)

        pSql = '''
        insert into {project_schema}.environmental_constraint_geographies_table_unioned (wkb_geometry, acres, primary_id, {insert_id_field_format})
               SELECT
                    st_buffer(wkb_geometry, 0) as wkb_geometry,
                    st_area(st_buffer(wkb_geometry, 0)) * 0.000247105 as acres,
                    primary_id, {insert_id_field_format}

                    FROM (
                        SELECT
                            (ST_Dump(wkb_geometry)).geom as wkb_geometry,
                            primary_id, {insert_id_field_format}

                        FROM (
                            SELECT ST_Polygonize(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}   FROM (
                                SELECT ST_Collect(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}   FROM (
                                    SELECT ST_ExteriorRing(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}
                                        FROM {project_schema}.environmental_constraint_geographies_table) AS lines
                                            group by primary_id, {insert_id_field_format}) AS noded_lines
                                                group by primary_id, {insert_id_field_format}) as polygons
                    ) as final
                WHERE st_area(st_buffer(wkb_geometry, 0)) > 5;'''.format(
            project_schema=options['project_schema'],
            insert_id_field_format=insert_id_field_format)

        execute_sql(pSql)

        logger.info('finished unioning env constraints: {time} elapsed'.format(
            time=time.time() - start_time))

        #reproject table back to 4326 for integration with web viewing
        pSql = '''
        SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table_unioned', 'wkb_geometry', 4326);
        update {project_schema}.environmental_constraint_geographies_table_unioned a set wkb_geometry = st_transform(st_buffer(wkb_geometry, 0), 4326);
        '''.format(project_schema=options['project_schema'])
        execute_sql(pSql)

        add_geom_idx(options['project_schema'],
                     'environmental_constraint_geographies_table_unioned')

        logger.info('Env Union Finished: %s' % str(time.time() - start_time))
    def run_future_water_calculations(self, **kwargs):

        self.base_year = self.config_entity.scenario.project.base_year
        self.future_year = self.config_entity.scenario.year
        self.increment = self.future_year - self.base_year
        self.annualize_efficiencies()

        features = self.end_state_class.objects.filter(
            Q(du__gt=0) | Q(emp__gt=0))

        annotated_features = annotated_related_feature_class_pk_via_geographies(
            features, self.config_entity,
            [DbEntityKey.BASE_CANVAS, DbEntityKey.CLIMATE_ZONES])

        water_output_list = []

        options = dict(
            water_result_table=self.water_class.db_entity_key,
            water_schema=parse_schema_and_table(
                self.water_class._meta.db_table)[0],
            base_table=self.base_class.db_entity_key,
            base_schema=parse_schema_and_table(
                self.base_class._meta.db_table)[0],
        )
        approx_fifth = int(annotated_features.count() / 14 -
                           1) if annotated_features.count() > 30 else 1
        i = 1
        for feature in annotated_features.iterator():
            self.feature = feature
            self.result_dict = defaultdict(lambda: float(0))

            if i % approx_fifth == 0:
                self.report_progress(0.05, **kwargs)

            base_feature = self.base_class.objects.get(id=feature.base_canvas)
            if feature.climate_zones:
                climate_zone_feature = self.climate_zone_class.objects.get(
                    id=feature.climate_zones)
            else:
                logger.warn(
                    "No Climate Zone intersection for feature id {0} or check geography relation table"
                    .format(feature.id))
                continue

            self.feature_dict = dict(
                id=feature.id,
                pop=float(feature.pop),
                hh=float(feature.hh),
                emp=float(feature.emp),
                evapotranspiration_zone=climate_zone_feature.
                evapotranspiration_zone.zone,
                annual_evapotranspiration=float(
                    climate_zone_feature.evapotranspiration_zone.
                    annual_evapotranspiration),
            )

            for use in 'residential', 'commercial':
                key = "{use}_irrigated_sqft".format(use=use)
                self.feature_dict.update({
                    key + "_redev":
                    self.redev_units(key, feature, base_feature),
                    key + "_new":
                    self.new_units(key, feature, base_feature),
                    key + "_base":
                    float(getattr(base_feature, key))
                })

            future_residential_factor = feature.hh / feature.du * feature.pop / feature.hh if feature.hh > 0 else 0
            base_residential_factor = base_feature.hh / base_feature.du * base_feature.pop / base_feature.hh if base_feature.hh else 0

            for key in self.RESIDENTIAL_TYPES:
                self.feature_dict.update({
                    key + "_redev":
                    self.redev_units(key, feature, base_feature) *
                    float(base_residential_factor),
                    key + "_new":
                    self.new_units(key, feature, base_feature) *
                    float(future_residential_factor),
                    key + "_base":
                    float(getattr(base_feature, key)) *
                    float(base_residential_factor)
                })

            for key in self.COMMERCIAL_TYPES:
                self.feature_dict.update({
                    key + "_redev":
                    self.redev_units("emp_" + key, feature, base_feature) *
                    float(base_residential_factor),
                    key + "_new":
                    self.new_units("emp_" + key, feature, base_feature) *
                    float(future_residential_factor),
                    key + "_base":
                    float(getattr(base_feature, "emp_" + key)) *
                    float(base_residential_factor)
                })

            self.calculate_future_water()
            self.calculate_visualized_field()

            output_row = map(lambda key: self.result_dict.get(key),
                             self.output_fields)
            water_output_list.append(output_row)
            i += 1

        return water_output_list, options
    def update(self, **kwargs):
        """
        Executes the VMT module functions
        """
        public_health_variables_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.PH_VARIABLES)
        uf_150mgrid_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.GRID_150M)

        census_rate_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.CENSUS_RATES)
        grid_outcome_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.PH_GRID_OUTCOMES)
        block_group_outcome_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.PH_BLOCK_GROUP_OUTCOMES)
        outcome_summary_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.PH_OUTCOMES_SUMMARY)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            scenario_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.END_STATE)
            trip_length_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.VMT_FUTURE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.FUTURE_TRANSIT_STOPS)

        else:
            scenario_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.BASE_CANVAS)
            trip_length_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.VMT_BASE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(
                DbEntityKey.BASE_TRANSIT_STOPS)

        sql_config_dict = dict(
            uf_canvas_table=scenario_class.db_entity_key,
            uf_canvas_schema=parse_schema_and_table(
                scenario_class._meta.db_table)[0],
            public_health_variables_table=public_health_variables_class.
            db_entity_key,
            public_health_variables_schema=parse_schema_and_table(
                public_health_variables_class._meta.db_table)[0],
            source_grid_table=uf_150mgrid_class.db_entity_key,
            source_grid_schema=parse_schema_and_table(
                uf_150mgrid_class._meta.db_table)[0],
            transit_stop_table=transit_stop_class.db_entity_key,
            transit_stop_schema=parse_schema_and_table(
                transit_stop_class._meta.db_table)[0],
            census_rate_table=census_rate_class.db_entity_key,
            census_rate_schema=parse_schema_and_table(
                census_rate_class._meta.db_table)[0],
            trip_lengths_table=trip_length_class.db_entity_key,
            trip_lengths_schema=parse_schema_and_table(
                trip_length_class._meta.db_table)[0],
            grid_outcome_schema=parse_schema_and_table(
                grid_outcome_class._meta.db_table)[0],
            grid_outcome_table=grid_outcome_class.db_entity_key,
            block_group_outcome_schema=parse_schema_and_table(
                block_group_outcome_class._meta.db_table)[0],
            block_group_outcome_table=block_group_outcome_class.db_entity_key,
            outcome_summary_schema=parse_schema_and_table(
                outcome_summary_class._meta.db_table)[0],
            outcome_summary_table=outcome_summary_class.db_entity_key)

        self.run_public_health_preprocesses(sql_config_dict, **kwargs)
        read_UD4H_model_doc()
        self.read_public_health_models()

        all_features = public_health_variables_class.objects.filter(
            Q(pop__gt=0))
        output = []

        for feature in all_features:

            feature_dict = dict(
                id=feature.id,
                pop=feature.pop,
                pop_adult=feature.pop_adult,
                pop_adult_high=feature.pop_adult_high,
                pop_adult_med=feature.pop_adult_med,
                pop_adult_low=feature.pop_adult_low,
                pop_senior=feature.pop_senior,
                pop_teen=feature.pop_teen,
                pop_children=feature.pop_children,
                hh=feature.hh,
                gender2=feature.gender2,  # = percent_female
                age_children=feature.age_children,  # = average_age
                age_adult=feature.age_adult,  # = average_age
                age_teens=feature.age_teens,  # = average_age
                age_seniors=feature.age_seniors,  # = average_age
                racehisp1=feature.racehisp1,  # = pop_white
                racehisp2=feature.racehisp2,  # = pop_black
                racehisp4=feature.racehisp4,  # = pop_asian
                racehisp97=feature.
                racehisp97,  # = pop_american_indian + pop_hawaiian_islander + pop_other_ethnicity
                emply2=feature.emply2,  # = pop_age16_up - pop_employed
                educa2=feature.educa2,  # = % pop_hs_diploma
                educa3=feature.educa3,  # = % pop_some_college
                educa4=feature.educa4,  # = % pop_college_degree
                educa5=feature.educa5,  # = % pop_graduate_degree
                own2=feature.own2,  # = percent renters
                hhsize=feature.hhsize,  # = hh_avg_size
                hhveh=feature.hhveh,  # = hh_avg_vehicles
                incom2=feature.
                incom2,  # = income_10k_35k = hh_inc_10_20 + hh_inc_20_30 + hh_inc_30_40/2
                incom3=feature.
                incom3,  # = income_35k_50k = hh_inc_30_40/2 + hh_inc_40_50
                incom4=feature.
                incom4,  # = income_50k_75k = hh_inc_50_60 + hh_inc_60_75
                incom5=feature.incom5,  # = income_75k_100k = hh_inc_75_100
                incom6=feature.
                incom6,  # = income_100k_150k = hh_inc_100_125 + hh_inc_125_150
                incom7=feature.incom7,  # = income_150k_plus = hh_inc_150
                child_any1=feature.child_any1,  # = % hh_with_children
                disabled1_children=feature.
                disabled1_children,  # = % pop with disability (excluding trouble walking)
                disabled1_teens=feature.
                disabled1_teens,  # = % pop with disability (excluding trouble walking)
                disabled1_adult=feature.
                disabled1_adult,  # = % pop with disability (excluding trouble walking)
                disabled1_seniors=feature.
                disabled1_seniors,  # = % pop with disability (excluding trouble walking)
                disabled2_children=feature.
                disabled2_children,  # = % pop with trouble walking
                disabled2_teens=feature.
                disabled2_teens,  # = % pop with trouble walking
                disabled2_adult=feature.
                disabled2_adult,  # = % pop with trouble walking
                disabled2_seniors=feature.
                disabled2_seniors,  # = % pop with trouble walking
                emply_hh=feature.emply_hh,  # = employed_pop / hh
                educa_hh2=feature.educa_hh2,  # = % hh with high school diploma
                educa_hh3=feature.educa_hh3,  # = % hh with some college
                educa_hh4=feature.educa_hh4,  # = % hh with college degree
                educa_hh5=feature.educa_hh5,  # = % hh with graduate degree)

                # BE Vars
                bldg_sqft_res=feature.bldg_sqft_res,
                bldg_sqft_ret1=feature.bldg_sqft_ret1,
                bldg_sqft_off=feature.bldg_sqft_off,
                b1=feature.b1,
                b2=feature.b2,
                b3=feature.b3,
                b4=feature.b4,
                b5=feature.b5,
                a=feature.a,
                du_1km_tr=feature.du_1km_tr,
                resmix_dens=feature.resmix_dens,
                bldg_sqft_ret=feature.bldg_sqft_ret,
                far_nonres=feature.far_nonres,
                mix5=feature.mix5,
                rail_any=feature.rail_any,
                transit_distance=feature.transit_distance,
                transit_count=feature.transit_count,
                school_distance=feature.school_distance,
                retail_distance=feature.retail_distance,
                restaurant_distance=feature.restaurant_distance,
                intersection_density_sqmi=feature.intersection_density_sqmi,
                local_street=feature.local_street,
                major_street=feature.major_street,
                freeway_arterial_any=feature.freeway_arterial_any,
                park_open_space_distance=feature.park_open_space_distance,
                acres_parcel_park_open_space=feature.
                acres_parcel_park_open_space,
                du_variable=feature.du_variable,
                emp_variable=feature.emp_variable,
                res_index=feature.res_index,
                com_index=feature.com_index,
                park_access=feature.park_access,
                regional_access=feature.regional_access,
                network_index=feature.network_index,
                transit_access=feature.transit_access,
                majrd_index=feature.major_road_index,
                walk_index=feature.walk_index,
                walk_index_chts_senior_walk_any=feature.
                walk_index_chts_senior_walk_any,
                walk_index_chts_senior_auto_min=feature.
                walk_index_chts_senior_auto_min,
                walk_index_chts_teens_walk_any=feature.
                walk_index_chts_teens_walk_any,
                walk_index_chts_child_walk_any=feature.
                walk_index_chts_child_walk_any,
                walk_index_chts_adult_bike_any=feature.
                walk_index_chts_adult_bike_any,
                walk_index_chts_adult_walk_min=feature.
                walk_index_chts_adult_walk_min,
                walk_index_chts_senior_walk_min=feature.
                walk_index_chts_senior_walk_min,
                walk_index_chts_teens_walk_min=feature.
                walk_index_chts_teens_walk_min,
                walk_index_chts_adult_auto_min=feature.
                walk_index_chts_adult_auto_min,
                walk_index_chis_adult_modPA_any=feature.
                walk_index_chis_adult_modpa_any,
                walk_index_chis_adult_overweight=feature.
                walk_index_chis_adult_overweight,
                walk_index_chis_senior_overweight=feature.
                walk_index_chis_senior_overweight,
                walk_index_chis_adult_obese=feature.
                walk_index_chis_adult_obese,
                walk_index_chis_senior_gh=feature.walk_index_chis_senior_gh,
                walk_index_chis_senior_walk_le_min=feature.
                walk_index_chis_senior_walk_le_min,
                walk_index_chis_adult_modPA_min=feature.
                walk_index_chis_adult_modpa_min,
                walk_index_chis_adult_bmi=feature.walk_index_chis_adult_bmi,
                walk_index_chis_child_PA60=feature.walk_index_chis_child_pa60,
                walk_index_chis_child_walk_school=feature.
                walk_index_chis_child_walk_school)

            model_result = self.apply_public_health_models_to_feature(
                feature_dict)
            result_minutes = self.populate_output_minutes(model_result)
            output_dict = map(lambda key: result_minutes[key.lower()],
                              self.outcome_fields)
            output.append(output_dict)
        self.public_health_progress(0.1, **kwargs)
        self.write_results_to_database(sql_config_dict, output)
        self.public_health_progress(0.1, **kwargs)
        self.aggregate_results_to_block_group(sql_config_dict)
        self.public_health_progress(0.1, **kwargs)
        self.aggregate_results_to_outcomes_summary_table(sql_config_dict)
        self.public_health_progress(0.15, **kwargs)
def dynamic_model_table_exists(model_class):
    #logger.debug('Checking model class table {model_class} exists'.format(model_class=model_class._meta.db_table))
    return InformationSchema.objects.table_exists(
        *parse_schema_and_table(model_class._meta.db_table))
Exemple #21
0
    def run_base_water_calculations(self):

        features = self.base_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0))

        annotated_features = annotated_related_feature_class_pk_via_geographies(features, self.config_entity, [
            DbEntityKey.CLIMATE_ZONES])

        water_output_list = []

        options = dict(
            water_result_table=self.water_class.db_entity_key,
            water_schema=parse_schema_and_table(self.water_class._meta.db_table)[0],
            base_table=self.base_class.db_entity_key,
            base_schema=parse_schema_and_table(self.base_class._meta.db_table)[0],
        )

        for feature in annotated_features.iterator():
            self.result_dict = defaultdict(lambda: float(0))
            self.feature = feature

            if feature.climate_zones:
                climate_zone_feature = self.climate_zone_class.objects.get(id=feature.climate_zones)
            else:
                logger.warn("No Climate Zone intersection for feature id {0} or check geography relation table".format(feature.id))
                continue

            hh_factor = (feature.pop / feature.hh) * (feature.hh / feature.du) if (feature.du > 0 and feature.hh > 0) else 0

            self.feature_dict = dict(
                id=feature.id,
                pop=float(feature.pop),
                hh=float(feature.hh),
                emp=float(feature.emp),

                evapotranspiration_zone=climate_zone_feature.evapotranspiration_zone.zone,
                annual_evapotranspiration=float(climate_zone_feature.evapotranspiration_zone.annual_evapotranspiration),

                residential_irrigated_sqft=float(feature.residential_irrigated_sqft),
                commercial_irrigated_sqft=float(feature.commercial_irrigated_sqft),

                du_detsf_ll=float(feature.du_detsf_ll * hh_factor),
                du_detsf_sl=float(feature.du_detsf_sl * hh_factor),
                du_attsf=float(feature.du_attsf * hh_factor),
                du_mf=float(feature.du_mf * hh_factor),

                retail_services=float(feature.emp_retail_services),
                restaurant=float(feature.emp_restaurant),
                accommodation=float(feature.emp_accommodation),
                arts_entertainment=float(feature.emp_arts_entertainment),
                other_services=float(feature.emp_other_services),
                office_services=float(feature.emp_office_services),
                public_admin=float(feature.emp_public_admin),
                education=float(feature.emp_education),
                medical_services=float(feature.emp_medical_services),
                wholesale=float(feature.emp_wholesale),
                transport_warehousing=float(feature.emp_transport_warehousing),
                manufacturing=float(feature.emp_manufacturing),
                construction=float(feature.emp_construction),
                utilities=float(feature.emp_utilities),
                agriculture=float(feature.emp_agriculture),
                extraction=float(feature.emp_extraction),
                military=float(feature.emp_military)
            )

            self.calculate_base_water()
            self.calculate_visualized_field()
            output_row = map(lambda key: self.result_dict[key], self.output_fields)
            water_output_list.append(output_row)

        return water_output_list, options
Exemple #22
0
    def run_future_water_calculations(self, **kwargs):

        self.base_year = self.config_entity.scenario.project.base_year
        self.future_year = self.config_entity.scenario.year
        self.increment = self.future_year - self.base_year
        self.annualize_efficiencies()

        features = self.end_state_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0))

        annotated_features = annotated_related_feature_class_pk_via_geographies(features, self.config_entity, [
            DbEntityKey.BASE_CANVAS, DbEntityKey.CLIMATE_ZONES])

        water_output_list = []

        options = dict(
            water_result_table=self.water_class.db_entity_key,
            water_schema=parse_schema_and_table(self.water_class._meta.db_table)[0],
            base_table=self.base_class.db_entity_key,
            base_schema=parse_schema_and_table(self.base_class._meta.db_table)[0],
        )
        approx_fifth = int(annotated_features.count() / 14 - 1) if annotated_features.count() > 30 else 1
        i = 1
        for feature in annotated_features.iterator():
            self.feature = feature
            self.result_dict = defaultdict(lambda: float(0))

            if i % approx_fifth == 0:
                self.report_progress(0.05, **kwargs)

            base_feature = self.base_class.objects.get(id=feature.base_canvas)
            if feature.climate_zones:
                climate_zone_feature = self.climate_zone_class.objects.get(id=feature.climate_zones)
            else:
                logger.warn("No Climate Zone intersection for feature id {0} or check geography relation table".format(feature.id))
                continue

            self.feature_dict = dict(
                id=feature.id,
                pop=float(feature.pop),
                hh=float(feature.hh),
                emp=float(feature.emp),

                evapotranspiration_zone=climate_zone_feature.evapotranspiration_zone.zone,
                annual_evapotranspiration=float(climate_zone_feature.evapotranspiration_zone.annual_evapotranspiration),
            )

            for use in 'residential', 'commercial':
                key = "{use}_irrigated_sqft".format(use=use)
                self.feature_dict.update({
                    key + "_redev": self.redev_units(key, feature, base_feature),
                    key + "_new": self.new_units(key, feature, base_feature),
                    key + "_base": float(getattr(base_feature, key))
                })

            future_residential_factor = feature.hh / feature.du * feature.pop / feature.hh if feature.hh > 0 else 0
            base_residential_factor = base_feature.hh / base_feature.du * base_feature.pop / base_feature.hh if base_feature.hh else 0

            for key in self.RESIDENTIAL_TYPES:
                self.feature_dict.update({
                    key + "_redev": self.redev_units(key, feature, base_feature) * float(base_residential_factor),
                    key + "_new": self.new_units(key, feature, base_feature) * float(future_residential_factor),
                    key + "_base": float(getattr(base_feature, key)) * float(base_residential_factor)
                })

            for key in self.COMMERCIAL_TYPES:
                self.feature_dict.update({
                    key + "_redev": self.redev_units("emp_" + key, feature, base_feature) * float(base_residential_factor),
                    key + "_new": self.new_units("emp_" + key, feature, base_feature) * float(future_residential_factor),
                    key + "_base": float(getattr(base_feature, "emp_" +key)) * float(base_residential_factor)
                })

            self.calculate_future_water()
            self.calculate_visualized_field()

            output_row = map(lambda key: self.result_dict.get(key), self.output_fields)
            water_output_list.append(output_row)
            i += 1

        return water_output_list, options
def dynamic_model_table_exists(model_class):
    #logger.debug('Checking model class table {model_class} exists'.format(model_class=model_class._meta.db_table))
    return InformationSchema.objects.table_exists(*parse_schema_and_table(model_class._meta.db_table))
Exemple #24
0
    def update(self, **kwargs):

        """
            This function handles the update or creation on the environmental constraints geography producing the area
            for each layer with the environmental constraint behavior. This function will both add and remove
            constraints and produce the final constraints layer in the primary geography of the active scenario
        """
        # TODO : remove hard-coded 3310 (only works in CA), need to set an "analysis projection" in the Region
        start_time = time.time()

        current_db_entities = \
            set(self.config_entity.db_entities_having_behavior_key(BehaviorKey.Fab.ricate('environmental_constraint')))

        base_feature_class = self.config_entity.db_entity_feature_class(
            DbEntityKey.BASE_CANVAS)

        options = dict(
            project_schema=parse_schema_and_table(base_feature_class._meta.db_table)[0],
            base_table=base_feature_class.db_entity_key
        )

        logger.info('Inserting raw geographies into the environmental constraint geographies table for DbEntities: %s' % \
                    ', '.join(map(lambda db_entity: db_entity.name, current_db_entities)))

        drop_table('{project_schema}.environmental_constraint_geographies_table'.format(
            project_schema=options['project_schema'])
        )

        current_environmental_constraints = []
        for db_entity in current_db_entities:
            constraint_class = self.config_entity.db_entity_feature_class(db_entity.key)
            current_environmental_constraints.append(constraint_class.db_entity_key)

        create_id_field_format = create_sql_calculations(current_environmental_constraints, '{0}_id int')
        insert_id_field_format = create_sql_calculations(current_environmental_constraints, '{0}_id')

        pSql = '''
        create table {project_schema}.environmental_constraint_geographies_table
            (primary_id integer, wkb_geometry geometry, {create_id_field_format});
        SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table', 'wkb_geometry', 3310)

        '''.format(project_schema=options['project_schema'], create_id_field_format=create_id_field_format)

        execute_sql(pSql)

        for db_entity in current_db_entities:
            logger.info('Inserting into environmental constraint geographies table for DbEntity: %s' % db_entity.full_name)

            constraint_class = self.config_entity.db_entity_feature_class(db_entity.key)

            pSql = '''
                insert into {project_schema}.environmental_constraint_geographies_table (primary_id, wkb_geometry, {constraint_db_entity_key}_id) select
                    cast(primary_id as int), wkb_geometry, {constraint_db_entity_key}_id from (
                    select
                        id as primary_id,
                        {constraint_db_entity_id} as {constraint_db_entity_key}_id,
                        st_setSRID(st_transform(st_buffer((st_dump(wkb_geometry)).geom, 0), 3310), 3310) as wkb_geometry

                    from (
                        select b.id, st_intersection(a.wkb_geometry, b.wkb_geometry) as wkb_geometry
	                    from {constraint_schema}.{constraint_db_entity_key} a,
                        {project_schema}.{base_table} b
                            where st_intersects(a.wkb_geometry, b.wkb_geometry)) as intersection
                    ) as polygons;
                '''.format(
                project_schema=options['project_schema'],
                base_table=options['base_table'],
                constraint_schema=parse_schema_and_table(constraint_class._meta.db_table)[0],
                constraint_db_entity_key=constraint_class.db_entity_key,
                constraint_db_entity_id=db_entity.id
            )

            execute_sql(pSql)

            logger.info('finished inserting db_entity: {db_entity} {time} elapsed'.format(
                time=time.time() - start_time,
                db_entity=constraint_class.db_entity_key))

        #only regenerate the merged environmental constraint whenever an envrionmental constraint is added or removed
        # from the layer

        add_geom_idx(options['project_schema'], 'environmental_constraint_geographies_table')

        logger.info('Unioning all environmental constraint geographies')
        drop_table('{project_schema}.environmental_constraint_geographies_table_unioned'.format(
            project_schema=options['project_schema'])
        )

        pSql = '''
            CREATE TABLE {project_schema}.environmental_constraint_geographies_table_unioned
                (id serial, wkb_geometry geometry, acres float, primary_id int, {create_id_field_format});
            SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table_unioned', 'wkb_geometry', 3310);
        '''.format(project_schema=options['project_schema'], create_id_field_format=create_id_field_format)

        execute_sql(pSql)

        pSql = '''
        insert into {project_schema}.environmental_constraint_geographies_table_unioned (wkb_geometry, acres, primary_id, {insert_id_field_format})
               SELECT
                    st_buffer(wkb_geometry, 0) as wkb_geometry,
                    st_area(st_buffer(wkb_geometry, 0)) * 0.000247105 as acres,
                    primary_id, {insert_id_field_format}

                    FROM (
                        SELECT
                            (ST_Dump(wkb_geometry)).geom as wkb_geometry,
                            primary_id, {insert_id_field_format}

                        FROM (
                            SELECT ST_Polygonize(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}   FROM (
                                SELECT ST_Collect(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}   FROM (
                                    SELECT ST_ExteriorRing(wkb_geometry) AS wkb_geometry, primary_id, {insert_id_field_format}
                                        FROM {project_schema}.environmental_constraint_geographies_table) AS lines
                                            group by primary_id, {insert_id_field_format}) AS noded_lines
                                                group by primary_id, {insert_id_field_format}) as polygons
                    ) as final
                WHERE st_area(st_buffer(wkb_geometry, 0)) > 5;'''.format(
            project_schema=options['project_schema'],
            insert_id_field_format=insert_id_field_format
        )

        execute_sql(pSql)

        logger.info('finished unioning env constraints: {time} elapsed'.format(
            time=time.time() - start_time))

        #reproject table back to 4326 for integration with web viewing
        pSql = '''
        SELECT UpdateGeometrySRID('{project_schema}', 'environmental_constraint_geographies_table_unioned', 'wkb_geometry', 4326);
        update {project_schema}.environmental_constraint_geographies_table_unioned a set wkb_geometry = st_transform(st_buffer(wkb_geometry, 0), 4326);
        '''.format(
            project_schema=options['project_schema']
        )
        execute_sql(pSql)

        add_geom_idx(options['project_schema'], 'environmental_constraint_geographies_table_unioned')

        logger.info('Env Union Finished: %s' % str(time.time() - start_time))
Exemple #25
0
    def update(self, **kwargs):
        """
        Executes the VMT module functions
        """
        public_health_variables_class = self.config_entity.db_entity_feature_class(DbEntityKey.PH_VARIABLES)
        uf_150mgrid_class = self.config_entity.db_entity_feature_class(DbEntityKey.GRID_150M)

        census_rate_class = self.config_entity.db_entity_feature_class(DbEntityKey.CENSUS_RATES)
        grid_outcome_class = self.config_entity.db_entity_feature_class(DbEntityKey.PH_GRID_OUTCOMES)
        block_group_outcome_class = self.config_entity.db_entity_feature_class(DbEntityKey.PH_BLOCK_GROUP_OUTCOMES)
        outcome_summary_class = self.config_entity.db_entity_feature_class(DbEntityKey.PH_OUTCOMES_SUMMARY)

        if isinstance(self.config_entity.subclassed, FutureScenario):
            scenario_class = self.config_entity.db_entity_feature_class(DbEntityKey.END_STATE)
            trip_length_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_FUTURE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(DbEntityKey.FUTURE_TRANSIT_STOPS)

        else:
            scenario_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)
            trip_length_class = self.config_entity.db_entity_feature_class(DbEntityKey.VMT_BASE_TRIP_LENGTHS)
            transit_stop_class = self.config_entity.db_entity_feature_class(DbEntityKey.BASE_TRANSIT_STOPS)

        sql_config_dict = dict(
            uf_canvas_table=scenario_class.db_entity_key,
            uf_canvas_schema=parse_schema_and_table(scenario_class._meta.db_table)[0],
            public_health_variables_table=public_health_variables_class.db_entity_key,
            public_health_variables_schema=parse_schema_and_table(public_health_variables_class._meta.db_table)[0],
            source_grid_table=uf_150mgrid_class.db_entity_key,
            source_grid_schema=parse_schema_and_table(uf_150mgrid_class._meta.db_table)[0],
            transit_stop_table=transit_stop_class.db_entity_key,
            transit_stop_schema= parse_schema_and_table(transit_stop_class._meta.db_table)[0],
            census_rate_table=census_rate_class.db_entity_key,
            census_rate_schema=parse_schema_and_table(census_rate_class._meta.db_table)[0],
            trip_lengths_table=trip_length_class.db_entity_key,
            trip_lengths_schema=parse_schema_and_table(trip_length_class._meta.db_table)[0],
            grid_outcome_schema=parse_schema_and_table(grid_outcome_class._meta.db_table)[0],
            grid_outcome_table=grid_outcome_class.db_entity_key,
            block_group_outcome_schema=parse_schema_and_table(block_group_outcome_class._meta.db_table)[0],
            block_group_outcome_table=block_group_outcome_class.db_entity_key,
            outcome_summary_schema=parse_schema_and_table(outcome_summary_class._meta.db_table)[0],
            outcome_summary_table=outcome_summary_class.db_entity_key
        )

        self.run_public_health_preprocesses(sql_config_dict, **kwargs)
        read_UD4H_model_doc()
        self.read_public_health_models()

        all_features = public_health_variables_class.objects.filter(Q(pop__gt=0))
        output = []

        for feature in all_features:

            feature_dict = dict(
                id=feature.id,
                pop=feature.pop,
                pop_adult=feature.pop_adult,
                pop_adult_high=feature.pop_adult_high,
                pop_adult_med=feature.pop_adult_med,
                pop_adult_low=feature.pop_adult_low,
                pop_senior=feature.pop_senior,
                pop_teen=feature.pop_teen,
                pop_children=feature.pop_children,
                hh=feature.hh,
                gender2=feature.gender2,                   # = percent_female
                age_children=feature.age_children,         # = average_age
                age_adult=feature.age_adult,               # = average_age
                age_teens=feature.age_teens,               # = average_age
                age_seniors=feature.age_seniors,           # = average_age

                racehisp1=feature.racehisp1,               # = pop_white
                racehisp2=feature.racehisp2,               # = pop_black
                racehisp4=feature.racehisp4,               # = pop_asian
                racehisp97=feature.racehisp97,             # = pop_american_indian + pop_hawaiian_islander + pop_other_ethnicity
                emply2=feature.emply2,                  # = pop_age16_up - pop_employed
                educa2=feature.educa2,                  # = % pop_hs_diploma
                educa3=feature.educa3,                  # = % pop_some_college
                educa4=feature.educa4,                  # = % pop_college_degree
                educa5=feature.educa5,                 # = % pop_graduate_degree
                own2=feature.own2,                    # = percent renters
                hhsize=feature.hhsize,                 # = hh_avg_size
                hhveh=feature.hhveh,                    # = hh_avg_vehicles
                incom2=feature.incom2,                   # = income_10k_35k = hh_inc_10_20 + hh_inc_20_30 + hh_inc_30_40/2
                incom3=feature.incom3,                   # = income_35k_50k = hh_inc_30_40/2 + hh_inc_40_50
                incom4=feature.incom4,                   # = income_50k_75k = hh_inc_50_60 + hh_inc_60_75
                incom5=feature.incom5,                   # = income_75k_100k = hh_inc_75_100
                incom6=feature.incom6,                   # = income_100k_150k = hh_inc_100_125 + hh_inc_125_150
                incom7=feature.incom7,                   # = income_150k_plus = hh_inc_150
                child_any1=feature.child_any1,              # = % hh_with_children

                disabled1_children=feature.disabled1_children,     # = % pop with disability (excluding trouble walking)
                disabled1_teens=feature.disabled1_teens,        # = % pop with disability (excluding trouble walking)
                disabled1_adult=feature.disabled1_adult,        # = % pop with disability (excluding trouble walking)
                disabled1_seniors=feature.disabled1_seniors,      # = % pop with disability (excluding trouble walking)

                disabled2_children=feature.disabled2_children,      # = % pop with trouble walking
                disabled2_teens=feature.disabled2_teens,         # = % pop with trouble walking
                disabled2_adult=feature.disabled2_adult,         # = % pop with trouble walking
                disabled2_seniors=feature.disabled2_seniors,       # = % pop with trouble walking

                emply_hh=feature.emply_hh,                # = employed_pop / hh
                educa_hh2=feature.educa_hh2,               # = % hh with high school diploma
                educa_hh3=feature.educa_hh3,               # = % hh with some college
                educa_hh4=feature.educa_hh4,               # = % hh with college degree
                educa_hh5=feature.educa_hh5,               # = % hh with graduate degree)

                # BE Vars
                bldg_sqft_res=feature.bldg_sqft_res,
                bldg_sqft_ret1=feature.bldg_sqft_ret1,
                bldg_sqft_off=feature.bldg_sqft_off,
                b1=feature.b1,
                b2=feature.b2,
                b3=feature.b3,
                b4=feature.b4,
                b5=feature.b5,
                a=feature.a,
                du_1km_tr=feature.du_1km_tr,
                resmix_dens=feature.resmix_dens,
                bldg_sqft_ret=feature.bldg_sqft_ret,
                far_nonres=feature.far_nonres,
                mix5=feature.mix5,
                rail_any=feature.rail_any,
                transit_distance=feature.transit_distance,
                transit_count=feature.transit_count,
                school_distance=feature.school_distance,
                retail_distance=feature.retail_distance,
                restaurant_distance=feature.restaurant_distance,
                intersection_density_sqmi=feature.intersection_density_sqmi,
                local_street=feature.local_street,
                major_street=feature.major_street,
                freeway_arterial_any=feature.freeway_arterial_any,
                park_open_space_distance=feature.park_open_space_distance,
                acres_parcel_park_open_space=feature.acres_parcel_park_open_space,
                du_variable=feature.du_variable,
                emp_variable=feature.emp_variable,
                res_index=feature.res_index,
                com_index=feature.com_index,
                park_access=feature.park_access,
                regional_access=feature.regional_access,
                network_index=feature.network_index,
                transit_access=feature.transit_access,
                majrd_index=feature.major_road_index,
                walk_index=feature.walk_index,
                walk_index_chts_senior_walk_any=feature.walk_index_chts_senior_walk_any,
                walk_index_chts_senior_auto_min=feature.walk_index_chts_senior_auto_min,
                walk_index_chts_teens_walk_any=feature.walk_index_chts_teens_walk_any,
                walk_index_chts_child_walk_any=feature.walk_index_chts_child_walk_any,
                walk_index_chts_adult_bike_any=feature.walk_index_chts_adult_bike_any,
                walk_index_chts_adult_walk_min=feature.walk_index_chts_adult_walk_min,
                walk_index_chts_senior_walk_min=feature.walk_index_chts_senior_walk_min,
                walk_index_chts_teens_walk_min=feature.walk_index_chts_teens_walk_min,
                walk_index_chts_adult_auto_min=feature.walk_index_chts_adult_auto_min,
                walk_index_chis_adult_modPA_any=feature.walk_index_chis_adult_modpa_any,
                walk_index_chis_adult_overweight=feature.walk_index_chis_adult_overweight,
                walk_index_chis_senior_overweight=feature.walk_index_chis_senior_overweight,
                walk_index_chis_adult_obese=feature.walk_index_chis_adult_obese,
                walk_index_chis_senior_gh=feature.walk_index_chis_senior_gh,
                walk_index_chis_senior_walk_le_min=feature.walk_index_chis_senior_walk_le_min,
                walk_index_chis_adult_modPA_min=feature.walk_index_chis_adult_modpa_min,
                walk_index_chis_adult_bmi=feature.walk_index_chis_adult_bmi,
                walk_index_chis_child_PA60=feature.walk_index_chis_child_pa60,
                walk_index_chis_child_walk_school=feature.walk_index_chis_child_walk_school
            )

            model_result = self.apply_public_health_models_to_feature(feature_dict)
            result_minutes = self.populate_output_minutes(model_result)
            output_dict = map(lambda key: result_minutes[key.lower()], self.outcome_fields)
            output.append(output_dict)
        self.public_health_progress(0.1, **kwargs)
        self.write_results_to_database(sql_config_dict, output)
        self.public_health_progress(0.1, **kwargs)
        self.aggregate_results_to_block_group(sql_config_dict)
        self.public_health_progress(0.1, **kwargs)
        self.aggregate_results_to_outcomes_summary_table(sql_config_dict)
        self.public_health_progress(0.15, **kwargs)
Exemple #26
0
    def get_fields(cls, table_name):
        """
            Modifies the original handle_inspection to instead get all the fields of the table described by the db_entity
        :param db_entity:
        :return: A dict keyed by field named and valued by models.Field or variant instance
        """

        connection = connections['default']

        table2model = lambda table_name: table_name.title().replace(
            '_', '').replace(' ', '').replace('-', '')

        cursor = connection.cursor()

        try:
            relations = connection.introspection.get_relations(
                cursor, table_name)
        except NotImplementedError:
            relations = {}
        try:
            indexes = connection.introspection.get_indexes(cursor, table_name)
        except NotImplementedError:
            indexes = {}

        schema, table = parse_schema_and_table(table_name)
        # Fill this dict with field definitions
        fields = {}
        for i, row in enumerate(
                cls.get_table_description(cursor, table, schema, connection)):
            column_name = row[0]
            att_name = column_name.lower()
            comment_notes = [
            ]  # Holds Field notes, to be displayed in a Python comment.
            extra_params = {}  # Holds Field parameters such as 'db_column'.

            # If the column name can't be used verbatim as a Python
            # attribute, set the "db_column" for this Field.
            if ' ' in att_name or '-' in att_name or keyword.iskeyword(
                    att_name) or column_name != att_name:
                extra_params['db_column'] = column_name

            # Add primary_key and unique, if necessary.
            if column_name in indexes:
                if indexes[column_name]['primary_key']:
                    extra_params['primary_key'] = True
                elif indexes[column_name]['unique']:
                    extra_params['unique'] = True

            # Modify the field name to make it Python-compatible.
            if ' ' in att_name:
                att_name = att_name.replace(' ', '_')
                comment_notes.append('Field renamed to remove spaces.')

            if '-' in att_name:
                att_name = att_name.replace('-', '_')
                comment_notes.append('Field renamed to remove dashes.')

            if column_name != att_name:
                comment_notes.append('Field name made lowercase.')

            if i in relations:
                rel_to = relations[i][
                    1] == table_name and "'cls'" or table2model(
                        relations[i][1])
                field_type = 'ForeignKey(%s' % rel_to
                if att_name.endswith('_id'):
                    att_name = att_name[:-3]
                else:
                    extra_params['db_column'] = column_name
            else:
                # Calling `get_field_type` to get the field type string and any
                # additional paramters and notes.
                field_type, field_params, field_notes = cls.get_field_type(
                    connection, table_name, row)
                extra_params.update(field_params)
                comment_notes.extend(field_notes)

                field_type += '('

            if keyword.iskeyword(att_name):
                att_name += '_field'
                comment_notes.append(
                    'Field renamed because it was a Python reserved word.')

            if att_name[0].isdigit():
                att_name = 'number_%s' % att_name
                extra_params['db_column'] = unicode(column_name)
                comment_notes.append("Field renamed because it wasn't a "
                                     "valid Python identifier.")

            # Don't output 'id = meta.AutoField(primary_key=True)', because
            # that's assumed if it doesn't exist.
            if att_name == 'id' and field_type == 'AutoField(' and extra_params == {
                    'primary_key': True
            }:
                continue

            # Add 'null' and 'blank', if the 'null_ok' flag was present in the
            # table description.
            if row[6]:  # If it's NULL...
                extra_params['blank'] = True
                # Don't know why these are here, commenting out
                #if not field_type in ('TextField(', 'CharField('):
                extra_params['null'] = True

            field_desc = 'models.%s' % field_type
            if extra_params:
                if not field_desc.endswith('('):
                    field_desc += ', '
                field_desc += ', '.join(
                    ['%s=%r' % (k, v) for k, v in extra_params.items()])
            field_desc += ')'
            if comment_notes:
                field_desc += ' # ' + ' '.join(comment_notes)
            # Set the dict key/value to the field name and the evaluated field description
            fields[att_name] = eval(field_desc)
            fields[att_name].name = att_name
        return fields
    def update(self, **kwargs):

        logger.debug("Executing Environmental Constraints using {0}".format(
            self.config_entity))
        config_entity = self.config_entity
        end_state_feature_class = config_entity.db_entity_feature_class(
            DbEntityKey.END_STATE)
        base_table = config_entity.db_entity_feature_class(
            DbEntityKey.BASE_CANVAS)

        options = dict(project_schema=parse_schema_and_table(
            base_table._meta.db_table)[0],
                       scenario_schema=parse_schema_and_table(
                           end_state_feature_class._meta.db_table)[0],
                       end_state_table=end_state_feature_class.db_entity_key)

        current_db_entities, db_entities_to_add, db_entities_to_delete = \
            self.update_or_create_environmental_constraint_percents(config_entity)

        current_environmental_constraints = []
        logger.debug("Current db_entities {0}".format(current_db_entities))
        for db_entity in current_db_entities:
            logger.debug("Active db_entity {0}".format(db_entity.key))
            constraint_class = config_entity.db_entity_feature_class(
                db_entity.key)
            environmental_constraint_percent = EnvironmentalConstraintPercent.objects.filter(
                db_entity_id=db_entity.id, analysis_tool_id=self.id)[0]
            current_environmental_constraints.append(
                dict(key=constraint_class.db_entity_key,
                     priority=environmental_constraint_percent.priority,
                     percent=environmental_constraint_percent.percent))

        pSql = '''
        DO $$
            BEGIN
                BEGIN
                    ALTER TABLE {project_schema}.environmental_constraint_geographies_table_unioned ADD COLUMN constraint_acres_{config_entity_id} float;
                EXCEPTION
                    WHEN duplicate_column
                        THEN -- do nothing;
                END;
            END;
        $$'''.format(project_schema=options['project_schema'],
                     config_entity_id=self.config_entity.id)
        execute_sql(pSql)

        logger.info(
            'Calculate constraint acreage for the active scenario end state feature'
        )
        for db_entity in current_db_entities:
            constraint_class = self.config_entity.db_entity_feature_class(
                db_entity.key)
            environmental_constraint_percent = EnvironmentalConstraintPercent.objects.filter(
                db_entity_id=db_entity.id, analysis_tool_id=self.id)[0]
            constraint_percent = environmental_constraint_percent.percent
            active_constraint = filter(
                lambda dct: constraint_class.db_entity_key in dct['key'],
                current_environmental_constraints)[0]
            priority_constraints = filter(
                lambda dct: dct['priority'] < active_constraint['priority'] or
                (dct['priority'] == active_constraint['priority'] and dct[
                    'percent'] > active_constraint['percent']),
                current_environmental_constraints)

            priority_key_list = []
            for constraint in priority_constraints:
                priority_key_list.append(constraint['key'])

            priority_query = create_sql_calculations(
                priority_key_list, ' and {0}_id is null',
                ' and a.primary_id is not null')

            pSql = '''
            update {project_schema}.environmental_constraint_geographies_table_unioned a set
                constraint_acres_{config_entity_id} = acres * {percent} where {constraint}_id = {constraint_id} {priority_query};
            '''.format(project_schema=options['project_schema'],
                       constraint=constraint_class.db_entity_key,
                       constraint_id=db_entity.id,
                       percent=constraint_percent,
                       priority_query=priority_query,
                       config_entity_id=self.config_entity.id)

            execute_sql(pSql)

        pSql = '''
        update {scenario_schema}.{end_state_table} a set
            acres_developable = a.acres_gross - b.constraint_acres
            FROM
            (select primary_id,
                    sum(constraint_acres_{config_entity_id}) as constraint_acres
                from {project_schema}.environmental_constraint_geographies_table_unioned
                    where constraint_acres_{config_entity_id} is not null group by primary_id) b
        where a.id= b.primary_id;
        '''.format(scenario_schema=options['scenario_schema'],
                   project_schema=options['project_schema'],
                   end_state_table=options['end_state_table'],
                   config_entity_id=self.config_entity.id)

        execute_sql(pSql)

        pSql = '''
        update {scenario_schema}.{end_state_table}
        set developable_proportion = (
            case when acres_gross > 0 then acres_developable / acres_gross else 0 end
        )
        '''.format(scenario_schema=options['scenario_schema'],
                   end_state_table=options['end_state_table'])
        execute_sql(pSql)
Exemple #28
0
    def update(self, **kwargs):

        logger.debug("Executing Environmental Constraints using {0}".format(self.config_entity))
        config_entity = self.config_entity
        end_state_feature_class = config_entity.db_entity_feature_class(DbEntityKey.END_STATE)
        base_table = config_entity.db_entity_feature_class(DbEntityKey.BASE_CANVAS)

        options = dict(
            project_schema=parse_schema_and_table(base_table._meta.db_table)[0],
            scenario_schema=parse_schema_and_table(end_state_feature_class._meta.db_table)[0],
            end_state_table=end_state_feature_class.db_entity_key
        )

        current_db_entities, db_entities_to_add, db_entities_to_delete = \
            self.update_or_create_environmental_constraint_percents(config_entity)

        current_environmental_constraints = []
        logger.debug("Current db_entities {0}".format(current_db_entities))
        for db_entity in current_db_entities:
            logger.debug("Active db_entity {0}".format(db_entity.key))
            constraint_class = config_entity.db_entity_feature_class(db_entity.key)
            environmental_constraint_percent = EnvironmentalConstraintPercent.objects.filter(
                db_entity_id=db_entity.id,
                analysis_tool_id=self.id)[0]
            current_environmental_constraints.append(
                dict(
                    key=constraint_class.db_entity_key,
                    priority=environmental_constraint_percent.priority,
                    percent=environmental_constraint_percent.percent
                )
            )

        pSql = '''
        DO $$
            BEGIN
                BEGIN
                    ALTER TABLE {project_schema}.environmental_constraint_geographies_table_unioned ADD COLUMN constraint_acres_{config_entity_id} float;
                EXCEPTION
                    WHEN duplicate_column
                        THEN -- do nothing;
                END;
            END;
        $$'''.format(
            project_schema=options['project_schema'],
            config_entity_id=self.config_entity.id
        )
        execute_sql(pSql)

        logger.info('Calculate constraint acreage for the active scenario end state feature')
        for db_entity in current_db_entities:
            constraint_class = self.config_entity.db_entity_feature_class(db_entity.key)
            environmental_constraint_percent = EnvironmentalConstraintPercent.objects.filter(
                db_entity_id=db_entity.id,
                analysis_tool_id=self.id)[0]
            constraint_percent = environmental_constraint_percent.percent
            active_constraint = filter(lambda dct: constraint_class.db_entity_key in dct['key'], current_environmental_constraints)[0]
            priority_constraints = filter(lambda dct: dct['priority'] < active_constraint['priority'] or (dct['priority'] == active_constraint['priority'] and dct['percent'] > active_constraint['percent']), current_environmental_constraints)

            priority_key_list = []
            for constraint in priority_constraints:
                priority_key_list.append(constraint['key'])

            priority_query = create_sql_calculations(priority_key_list, ' and {0}_id is null', ' and a.primary_id is not null')

            pSql = '''
            update {project_schema}.environmental_constraint_geographies_table_unioned a set
                constraint_acres_{config_entity_id} = acres * {percent} where {constraint}_id = {constraint_id} {priority_query};
            '''.format(
                project_schema=options['project_schema'],
                constraint=constraint_class.db_entity_key,
                constraint_id=db_entity.id,
                percent=constraint_percent,
                priority_query=priority_query,
                config_entity_id=self.config_entity.id
            )

            execute_sql(pSql)

        pSql = '''
        update {scenario_schema}.{end_state_table} a set
            acres_developable = a.acres_gross - b.constraint_acres
            FROM
            (select primary_id,
                    sum(constraint_acres_{config_entity_id}) as constraint_acres
                from {project_schema}.environmental_constraint_geographies_table_unioned
                    where constraint_acres_{config_entity_id} is not null group by primary_id) b
        where a.id= b.primary_id;
        '''.format(
            scenario_schema=options['scenario_schema'],
            project_schema=options['project_schema'],
            end_state_table=options['end_state_table'],
            config_entity_id=self.config_entity.id
        )

        execute_sql(pSql)

        pSql = '''
        update {scenario_schema}.{end_state_table}
        set developable_proportion = (
            case when acres_gross > 0 then acres_developable / acres_gross else 0 end
        )
        '''.format(
            scenario_schema=options['scenario_schema'],
            end_state_table=options['end_state_table']
        )
        execute_sql(pSql)
    def run_base_water_calculations(self):

        features = self.base_class.objects.filter(Q(du__gt=0) | Q(emp__gt=0))

        annotated_features = annotated_related_feature_class_pk_via_geographies(
            features, self.config_entity, [DbEntityKey.CLIMATE_ZONES])

        water_output_list = []

        options = dict(
            water_result_table=self.water_class.db_entity_key,
            water_schema=parse_schema_and_table(
                self.water_class._meta.db_table)[0],
            base_table=self.base_class.db_entity_key,
            base_schema=parse_schema_and_table(
                self.base_class._meta.db_table)[0],
        )

        for feature in annotated_features.iterator():
            self.result_dict = defaultdict(lambda: float(0))
            self.feature = feature

            if feature.climate_zones:
                climate_zone_feature = self.climate_zone_class.objects.get(
                    id=feature.climate_zones)
            else:
                logger.warn(
                    "No Climate Zone intersection for feature id {0} or check geography relation table"
                    .format(feature.id))
                continue

            hh_factor = (feature.pop /
                         feature.hh) * (feature.hh / feature.du) if (
                             feature.du > 0 and feature.hh > 0) else 0

            self.feature_dict = dict(
                id=feature.id,
                pop=float(feature.pop),
                hh=float(feature.hh),
                emp=float(feature.emp),
                evapotranspiration_zone=climate_zone_feature.
                evapotranspiration_zone.zone,
                annual_evapotranspiration=float(
                    climate_zone_feature.evapotranspiration_zone.
                    annual_evapotranspiration),
                residential_irrigated_sqft=float(
                    feature.residential_irrigated_sqft),
                commercial_irrigated_sqft=float(
                    feature.commercial_irrigated_sqft),
                du_detsf_ll=float(feature.du_detsf_ll * hh_factor),
                du_detsf_sl=float(feature.du_detsf_sl * hh_factor),
                du_attsf=float(feature.du_attsf * hh_factor),
                du_mf=float(feature.du_mf * hh_factor),
                retail_services=float(feature.emp_retail_services),
                restaurant=float(feature.emp_restaurant),
                accommodation=float(feature.emp_accommodation),
                arts_entertainment=float(feature.emp_arts_entertainment),
                other_services=float(feature.emp_other_services),
                office_services=float(feature.emp_office_services),
                public_admin=float(feature.emp_public_admin),
                education=float(feature.emp_education),
                medical_services=float(feature.emp_medical_services),
                wholesale=float(feature.emp_wholesale),
                transport_warehousing=float(feature.emp_transport_warehousing),
                manufacturing=float(feature.emp_manufacturing),
                construction=float(feature.emp_construction),
                utilities=float(feature.emp_utilities),
                agriculture=float(feature.emp_agriculture),
                extraction=float(feature.emp_extraction),
                military=float(feature.emp_military))

            self.calculate_base_water()
            self.calculate_visualized_field()
            output_row = map(lambda key: self.result_dict[key],
                             self.output_fields)
            water_output_list.append(output_row)

        return water_output_list, options